2024-12-02 04:22:14,465 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-02 04:22:14,489 main DEBUG Took 0.022294 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 04:22:14,490 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 04:22:14,490 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 04:22:14,492 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 04:22:14,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,505 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 04:22:14,520 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,522 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,523 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,524 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,525 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,525 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,526 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,527 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,528 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,528 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,529 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,530 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,530 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,531 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,531 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,532 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,532 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,533 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,534 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,534 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,535 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,535 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,536 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,536 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 04:22:14,537 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,537 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 04:22:14,539 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 04:22:14,541 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 04:22:14,546 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 04:22:14,547 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 04:22:14,548 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 04:22:14,549 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 04:22:14,558 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 04:22:14,560 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 04:22:14,563 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 04:22:14,563 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 04:22:14,564 main DEBUG createAppenders(={Console}) 2024-12-02 04:22:14,564 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-02 04:22:14,565 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-02 04:22:14,565 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-02 04:22:14,566 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 04:22:14,566 main DEBUG OutputStream closed 2024-12-02 04:22:14,566 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 04:22:14,567 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 04:22:14,567 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-02 04:22:14,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 04:22:14,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 04:22:14,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 04:22:14,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 04:22:14,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 04:22:14,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 04:22:14,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 04:22:14,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 04:22:14,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 04:22:14,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 04:22:14,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 04:22:14,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 04:22:14,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 04:22:14,730 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 04:22:14,730 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 04:22:14,731 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 04:22:14,731 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 04:22:14,732 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 04:22:14,735 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 04:22:14,735 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-02 04:22:14,735 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 04:22:14,737 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-02T04:22:14,763 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-02 04:22:14,767 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 04:22:14,767 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T04:22:15,378 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206 2024-12-02T04:22:15,379 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-02T04:22:15,380 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-02T04:22:15,424 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T04:22:15,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T04:22:15,668 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20, deleteOnExit=true 2024-12-02T04:22:15,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T04:22:15,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/test.cache.data in system properties and HBase conf 2024-12-02T04:22:15,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T04:22:15,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir in system properties and HBase conf 2024-12-02T04:22:15,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T04:22:15,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T04:22:15,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T04:22:15,782 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T04:22:15,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T04:22:15,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T04:22:15,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T04:22:15,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T04:22:15,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T04:22:15,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T04:22:15,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T04:22:15,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T04:22:15,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T04:22:15,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/nfs.dump.dir in system properties and HBase conf 2024-12-02T04:22:15,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir in system properties and HBase conf 2024-12-02T04:22:15,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T04:22:15,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T04:22:15,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T04:22:17,123 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T04:22:17,203 INFO [Time-limited test {}] log.Log(170): Logging initialized @3814ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T04:22:17,300 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:17,404 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:17,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:17,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:17,501 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:17,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:17,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:17,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:17,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5996a1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-42955-hadoop-hdfs-3_4_1-tests_jar-_-any-11961536585802017999/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T04:22:17,883 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955} 2024-12-02T04:22:17,883 INFO [Time-limited test {}] server.Server(415): Started @4495ms 2024-12-02T04:22:18,469 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:18,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:18,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:18,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:18,478 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T04:22:18,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:18,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:18,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64986ec5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-39841-hadoop-hdfs-3_4_1-tests_jar-_-any-16925067410029554117/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:22:18,588 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841} 2024-12-02T04:22:18,588 INFO [Time-limited test {}] server.Server(415): Started @5200ms 2024-12-02T04:22:18,645 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T04:22:18,811 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:18,818 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:18,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:18,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:18,825 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:18,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:18,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:18,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b9a68a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-42841-hadoop-hdfs-3_4_1-tests_jar-_-any-9584996597847287475/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:22:18,963 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841} 2024-12-02T04:22:18,963 INFO [Time-limited test {}] server.Server(415): Started @5575ms 2024-12-02T04:22:18,965 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T04:22:19,030 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:19,036 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:19,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:19,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:19,049 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:19,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:19,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:19,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@367f3488{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-37197-hadoop-hdfs-3_4_1-tests_jar-_-any-7978483555562734634/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:22:19,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197} 2024-12-02T04:22:19,159 INFO [Time-limited test {}] server.Server(415): Started @5771ms 2024-12-02T04:22:19,162 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T04:22:20,344 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,344 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,344 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,344 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,467 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,472 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565/current, will proceed with Du for space computation calculation, 2024-12-02T04:22:20,475 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T04:22:20,487 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T04:22:20,517 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T04:22:20,552 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x807d0b6a35655a2b with lease ID 0x7cd4743a47b7979a: Processing first storage report for DS-0edf3608-9fbe-4207-8f07-26a6afe16934 from datanode DatanodeRegistration(127.0.0.1:38235, datanodeUuid=41803efc-33d8-4437-b70d-294066f67bca, infoPort=37067, infoSecurePort=0, ipcPort=45473, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,553 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x807d0b6a35655a2b with lease ID 0x7cd4743a47b7979a: from storage DS-0edf3608-9fbe-4207-8f07-26a6afe16934 node DatanodeRegistration(127.0.0.1:38235, datanodeUuid=41803efc-33d8-4437-b70d-294066f67bca, infoPort=37067, infoSecurePort=0, ipcPort=45473, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbf7eab3d380b4b3 with lease ID 0x7cd4743a47b7979b: Processing first storage report for DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c from datanode DatanodeRegistration(127.0.0.1:40371, datanodeUuid=b92c3561-0c0c-4dd7-be73-ec7390f1dfa5, infoPort=36609, infoSecurePort=0, ipcPort=46489, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbf7eab3d380b4b3 with lease ID 0x7cd4743a47b7979b: from storage DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c node DatanodeRegistration(127.0.0.1:40371, datanodeUuid=b92c3561-0c0c-4dd7-be73-ec7390f1dfa5, infoPort=36609, infoSecurePort=0, ipcPort=46489, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x971f41aea7ded772 with lease ID 0x7cd4743a47b7979c: Processing first storage report for DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8 from datanode DatanodeRegistration(127.0.0.1:38471, datanodeUuid=c925c3c1-f5f6-4ac2-969a-f22abf49463f, infoPort=42627, infoSecurePort=0, ipcPort=41555, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x971f41aea7ded772 with lease ID 0x7cd4743a47b7979c: from storage DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8 node DatanodeRegistration(127.0.0.1:38471, datanodeUuid=c925c3c1-f5f6-4ac2-969a-f22abf49463f, infoPort=42627, infoSecurePort=0, ipcPort=41555, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x807d0b6a35655a2b with lease ID 0x7cd4743a47b7979a: Processing first storage report for DS-19c4ce9e-83cd-4bb4-941e-facb2eb2cfa6 from datanode DatanodeRegistration(127.0.0.1:38235, datanodeUuid=41803efc-33d8-4437-b70d-294066f67bca, infoPort=37067, infoSecurePort=0, ipcPort=45473, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x807d0b6a35655a2b with lease ID 0x7cd4743a47b7979a: from storage DS-19c4ce9e-83cd-4bb4-941e-facb2eb2cfa6 node DatanodeRegistration(127.0.0.1:38235, datanodeUuid=41803efc-33d8-4437-b70d-294066f67bca, infoPort=37067, infoSecurePort=0, ipcPort=45473, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbf7eab3d380b4b3 with lease ID 0x7cd4743a47b7979b: Processing first storage report for DS-8327edea-11e2-446b-96ce-8e72a8468ba9 from datanode DatanodeRegistration(127.0.0.1:40371, datanodeUuid=b92c3561-0c0c-4dd7-be73-ec7390f1dfa5, infoPort=36609, infoSecurePort=0, ipcPort=46489, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbf7eab3d380b4b3 with lease ID 0x7cd4743a47b7979b: from storage DS-8327edea-11e2-446b-96ce-8e72a8468ba9 node DatanodeRegistration(127.0.0.1:40371, datanodeUuid=b92c3561-0c0c-4dd7-be73-ec7390f1dfa5, infoPort=36609, infoSecurePort=0, ipcPort=46489, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x971f41aea7ded772 with lease ID 0x7cd4743a47b7979c: Processing first storage report for DS-e4cad933-009f-41f8-9ef9-a1cab8ad3155 from datanode DatanodeRegistration(127.0.0.1:38471, datanodeUuid=c925c3c1-f5f6-4ac2-969a-f22abf49463f, infoPort=42627, infoSecurePort=0, ipcPort=41555, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565) 2024-12-02T04:22:20,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x971f41aea7ded772 with lease ID 0x7cd4743a47b7979c: from storage DS-e4cad933-009f-41f8-9ef9-a1cab8ad3155 node DatanodeRegistration(127.0.0.1:38471, datanodeUuid=c925c3c1-f5f6-4ac2-969a-f22abf49463f, infoPort=42627, infoSecurePort=0, ipcPort=41555, storageInfo=lv=-57;cid=testClusterID;nsid=1483613293;c=1733113336565), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T04:22:20,564 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206 2024-12-02T04:22:20,670 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/zookeeper_0, clientPort=62533, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T04:22:20,687 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62533 2024-12-02T04:22:20,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:20,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:21,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741825_1001 (size=7) 2024-12-02T04:22:21,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741825_1001 (size=7) 2024-12-02T04:22:21,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741825_1001 (size=7) 2024-12-02T04:22:21,101 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd with version=8 2024-12-02T04:22:21,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/hbase-staging 2024-12-02T04:22:21,208 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T04:22:21,469 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T04:22:21,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:21,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:21,489 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T04:22:21,489 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:21,489 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T04:22:21,617 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T04:22:21,680 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T04:22:21,695 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T04:22:21,699 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T04:22:21,735 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 116403 (auto-detected) 2024-12-02T04:22:21,737 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T04:22:21,762 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34209 2024-12-02T04:22:21,784 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34209 connecting to ZooKeeper ensemble=127.0.0.1:62533 2024-12-02T04:22:21,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342090x0, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T04:22:21,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34209-0x101959075a30000 connected 2024-12-02T04:22:21,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:21,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:21,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:22:21,968 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd, hbase.cluster.distributed=false 2024-12-02T04:22:22,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T04:22:22,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34209 2024-12-02T04:22:22,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34209 2024-12-02T04:22:22,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34209 2024-12-02T04:22:22,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34209 2024-12-02T04:22:22,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34209 2024-12-02T04:22:22,126 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T04:22:22,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,128 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T04:22:22,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T04:22:22,131 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T04:22:22,134 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T04:22:22,135 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41643 2024-12-02T04:22:22,137 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41643 connecting to ZooKeeper ensemble=127.0.0.1:62533 2024-12-02T04:22:22,138 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416430x0, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T04:22:22,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416430x0, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:22:22,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41643-0x101959075a30001 connected 2024-12-02T04:22:22,182 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T04:22:22,187 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T04:22:22,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T04:22:22,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T04:22:22,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41643 2024-12-02T04:22:22,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41643 2024-12-02T04:22:22,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41643 2024-12-02T04:22:22,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41643 2024-12-02T04:22:22,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41643 2024-12-02T04:22:22,217 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T04:22:22,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,218 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T04:22:22,218 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,218 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T04:22:22,218 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T04:22:22,218 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T04:22:22,219 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37413 2024-12-02T04:22:22,221 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37413 connecting to ZooKeeper ensemble=127.0.0.1:62533 2024-12-02T04:22:22,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374130x0, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T04:22:22,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:374130x0, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:22:22,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37413-0x101959075a30002 connected 2024-12-02T04:22:22,234 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T04:22:22,235 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T04:22:22,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T04:22:22,238 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T04:22:22,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37413 2024-12-02T04:22:22,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37413 2024-12-02T04:22:22,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37413 2024-12-02T04:22:22,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37413 2024-12-02T04:22:22,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37413 2024-12-02T04:22:22,259 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T04:22:22,259 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T04:22:22,260 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T04:22:22,261 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34291 2024-12-02T04:22:22,262 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34291 connecting to ZooKeeper ensemble=127.0.0.1:62533 2024-12-02T04:22:22,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342910x0, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T04:22:22,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342910x0, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:22:22,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34291-0x101959075a30003 connected 2024-12-02T04:22:22,276 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T04:22:22,277 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T04:22:22,278 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T04:22:22,279 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T04:22:22,283 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34291 2024-12-02T04:22:22,283 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34291 2024-12-02T04:22:22,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34291 2024-12-02T04:22:22,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34291 2024-12-02T04:22:22,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34291 2024-12-02T04:22:22,304 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2eaa0f11f7e:34209 2024-12-02T04:22:22,305 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:22,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,320 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:22,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T04:22:22,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T04:22:22,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T04:22:22,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,366 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T04:22:22,369 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2eaa0f11f7e,34209,1733113341303 from backup master directory 2024-12-02T04:22:22,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T04:22:22,388 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T04:22:22,388 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:22,391 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T04:22:22,395 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T04:22:22,473 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/hbase.id] with ID: dbdb8f4d-d324-4e0c-81d0-e4860d09061c 2024-12-02T04:22:22,473 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.tmp/hbase.id 2024-12-02T04:22:22,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741826_1002 (size=42) 2024-12-02T04:22:22,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741826_1002 (size=42) 2024-12-02T04:22:22,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741826_1002 (size=42) 2024-12-02T04:22:22,497 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.tmp/hbase.id]:[hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/hbase.id] 2024-12-02T04:22:22,566 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:22,572 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T04:22:22,592 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-02T04:22:22,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:22,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741827_1003 (size=196) 2024-12-02T04:22:22,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741827_1003 (size=196) 2024-12-02T04:22:22,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741827_1003 (size=196) 2024-12-02T04:22:22,676 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:22:22,678 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T04:22:22,694 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:22:22,700 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T04:22:22,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741828_1004 (size=1189) 2024-12-02T04:22:22,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741828_1004 (size=1189) 2024-12-02T04:22:22,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741828_1004 (size=1189) 2024-12-02T04:22:22,759 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/data/master/store 2024-12-02T04:22:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741829_1005 (size=34) 2024-12-02T04:22:22,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741829_1005 (size=34) 2024-12-02T04:22:22,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741829_1005 (size=34) 2024-12-02T04:22:22,790 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T04:22:22,794 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:22,796 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T04:22:22,797 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:22:22,797 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:22:22,799 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T04:22:22,799 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:22:22,799 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:22:22,801 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733113342796Disabling compacts and flushes for region at 1733113342796Disabling writes for close at 1733113342799 (+3 ms)Writing region close event to WAL at 1733113342799Closed at 1733113342799 2024-12-02T04:22:22,803 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/data/master/store/.initializing 2024-12-02T04:22:22,804 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:22,814 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T04:22:22,831 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C34209%2C1733113341303, suffix=, logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303, archiveDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/oldWALs, maxLogs=10 2024-12-02T04:22:22,860 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836, exclude list is [], retry=0 2024-12-02T04:22:22,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c,DISK] 2024-12-02T04:22:22,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38471,DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8,DISK] 2024-12-02T04:22:22,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38235,DS-0edf3608-9fbe-4207-8f07-26a6afe16934,DISK] 2024-12-02T04:22:22,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T04:22:22,935 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 2024-12-02T04:22:22,936 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42627:42627),(127.0.0.1/127.0.0.1:36609:36609),(127.0.0.1/127.0.0.1:37067:37067)] 2024-12-02T04:22:22,937 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T04:22:22,938 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:22,943 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:22,944 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:22,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T04:22:23,020 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:23,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T04:22:23,027 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:23,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T04:22:23,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:23,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T04:22:23,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:23,043 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,048 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,049 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,056 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,056 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,060 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T04:22:23,064 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T04:22:23,069 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:23,070 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70806452, jitterRate=0.05509835481643677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T04:22:23,075 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733113342966Initializing all the Stores at 1733113342969 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113342969Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113342970 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113342971 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113342971Cleaning up temporary data from old regions at 1733113343057 (+86 ms)Region opened successfully at 1733113343075 (+18 ms) 2024-12-02T04:22:23,077 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T04:22:23,107 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c013b9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T04:22:23,136 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T04:22:23,146 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T04:22:23,146 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T04:22:23,150 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T04:22:23,152 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T04:22:23,157 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-02T04:22:23,157 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T04:22:23,183 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T04:22:23,191 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T04:22:23,208 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T04:22:23,212 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T04:22:23,214 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T04:22:23,224 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T04:22:23,228 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T04:22:23,232 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T04:22:23,241 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T04:22:23,243 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T04:22:23,249 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T04:22:23,266 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T04:22:23,274 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,285 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2eaa0f11f7e,34209,1733113341303, sessionid=0x101959075a30000, setting cluster-up flag (Was=false) 2024-12-02T04:22:23,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,333 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T04:22:23,335 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:23,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:23,383 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T04:22:23,385 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:23,391 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T04:22:23,419 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-02T04:22:23,423 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:23,423 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-02T04:22:23,473 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T04:22:23,484 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T04:22:23,491 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(746): ClusterId : dbdb8f4d-d324-4e0c-81d0-e4860d09061c 2024-12-02T04:22:23,491 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(746): ClusterId : dbdb8f4d-d324-4e0c-81d0-e4860d09061c 2024-12-02T04:22:23,491 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(746): ClusterId : dbdb8f4d-d324-4e0c-81d0-e4860d09061c 2024-12-02T04:22:23,494 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T04:22:23,495 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T04:22:23,495 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T04:22:23,495 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T04:22:23,500 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2eaa0f11f7e,34209,1733113341303 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T04:22:23,536 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T04:22:23,536 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T04:22:23,536 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T04:22:23,536 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T04:22:23,536 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T04:22:23,536 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T04:22:23,536 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T04:22:23,536 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T04:22:23,537 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T04:22:23,537 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T04:22:23,537 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2eaa0f11f7e:0, corePoolSize=10, maxPoolSize=10 2024-12-02T04:22:23,537 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,537 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T04:22:23,538 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,540 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733113373540 2024-12-02T04:22:23,542 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T04:22:23,544 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T04:22:23,545 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T04:22:23,546 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T04:22:23,548 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T04:22:23,548 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T04:22:23,549 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T04:22:23,549 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T04:22:23,550 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T04:22:23,551 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T04:22:23,551 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T04:22:23,551 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46b0598c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T04:22:23,551 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ad7b9b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T04:22:23,551 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@230270c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T04:22:23,553 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,553 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T04:22:23,551 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,565 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2eaa0f11f7e:41643 2024-12-02T04:22:23,568 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T04:22:23,569 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T04:22:23,569 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T04:22:23,569 INFO [RS:0;e2eaa0f11f7e:41643 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:23,569 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T04:22:23,572 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34209,1733113341303 with port=41643, startcode=1733113342088 2024-12-02T04:22:23,573 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;e2eaa0f11f7e:34291 2024-12-02T04:22:23,574 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T04:22:23,574 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T04:22:23,574 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T04:22:23,574 INFO [RS:2;e2eaa0f11f7e:34291 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:23,574 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T04:22:23,576 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34209,1733113341303 with port=34291, startcode=1733113342258 2024-12-02T04:22:23,576 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T04:22:23,579 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T04:22:23,579 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T04:22:23,582 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T04:22:23,583 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T04:22:23,585 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T04:22:23,585 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T04:22:23,586 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733113343584,5,FailOnTimeoutGroup] 2024-12-02T04:22:23,592 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e2eaa0f11f7e:37413 2024-12-02T04:22:23,592 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T04:22:23,592 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T04:22:23,593 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T04:22:23,593 INFO [RS:1;e2eaa0f11f7e:37413 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:23,593 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T04:22:23,596 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733113343586,5,FailOnTimeoutGroup] 2024-12-02T04:22:23,596 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,596 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T04:22:23,596 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34209,1733113341303 with port=37413, startcode=1733113342217 2024-12-02T04:22:23,597 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T04:22:23,598 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,598 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741831_1007 (size=1321) 2024-12-02T04:22:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741831_1007 (size=1321) 2024-12-02T04:22:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741831_1007 (size=1321) 2024-12-02T04:22:23,633 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T04:22:23,634 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:23,645 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40043, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T04:22:23,645 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44821, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T04:22:23,647 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57989, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T04:22:23,653 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,656 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,671 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,671 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,675 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:23,675 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34539 2024-12-02T04:22:23,675 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T04:22:23,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,679 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:23,679 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34539 2024-12-02T04:22:23,679 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T04:22:23,683 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:23,683 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34539 2024-12-02T04:22:23,683 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T04:22:23,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741832_1008 (size=32) 2024-12-02T04:22:23,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741832_1008 (size=32) 2024-12-02T04:22:23,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741832_1008 (size=32) 2024-12-02T04:22:23,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:23,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T04:22:23,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T04:22:23,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:23,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T04:22:23,706 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T04:22:23,706 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:23,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T04:22:23,709 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T04:22:23,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:23,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T04:22:23,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T04:22:23,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:23,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:23,716 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T04:22:23,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740 2024-12-02T04:22:23,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740 2024-12-02T04:22:23,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T04:22:23,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T04:22:23,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T04:22:23,723 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T04:22:23,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T04:22:23,733 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] zookeeper.ZKUtil(111): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,733 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] zookeeper.ZKUtil(111): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,733 WARN [RS:1;e2eaa0f11f7e:37413 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T04:22:23,733 WARN [RS:0;e2eaa0f11f7e:41643 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T04:22:23,733 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] zookeeper.ZKUtil(111): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,733 INFO [RS:0;e2eaa0f11f7e:41643 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T04:22:23,733 INFO [RS:1;e2eaa0f11f7e:37413 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T04:22:23,733 WARN [RS:2;e2eaa0f11f7e:34291 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T04:22:23,733 INFO [RS:2;e2eaa0f11f7e:34291 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T04:22:23,734 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,734 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,734 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,740 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:23,741 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,34291,1733113342258] 2024-12-02T04:22:23,741 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,41643,1733113342088] 2024-12-02T04:22:23,741 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,37413,1733113342217] 2024-12-02T04:22:23,742 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70733258, jitterRate=0.05400767922401428}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T04:22:23,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733113343695Initializing all the Stores at 1733113343697 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113343698 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113343698Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113343698Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113343698Cleaning up temporary data from old regions at 1733113343722 (+24 ms)Region opened successfully at 1733113343745 (+23 ms) 2024-12-02T04:22:23,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T04:22:23,746 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T04:22:23,746 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T04:22:23,746 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T04:22:23,746 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T04:22:23,755 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T04:22:23,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733113343745Disabling compacts and flushes for region at 1733113343745Disabling writes for close at 1733113343746 (+1 ms)Writing region close event to WAL at 1733113343755 (+9 ms)Closed at 1733113343755 2024-12-02T04:22:23,759 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T04:22:23,760 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T04:22:23,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T04:22:23,774 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T04:22:23,774 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T04:22:23,776 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T04:22:23,777 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T04:22:23,781 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T04:22:23,792 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T04:22:23,795 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T04:22:23,797 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T04:22:23,801 INFO [RS:0;e2eaa0f11f7e:41643 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T04:22:23,801 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,801 INFO [RS:1;e2eaa0f11f7e:37413 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T04:22:23,802 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,802 INFO [RS:2;e2eaa0f11f7e:34291 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T04:22:23,802 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,807 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T04:22:23,807 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T04:22:23,808 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T04:22:23,814 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T04:22:23,814 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T04:22:23,814 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T04:22:23,817 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,817 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,817 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,817 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,817 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T04:22:23,818 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,818 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,819 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,819 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T04:22:23,819 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,819 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,819 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,819 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T04:22:23,827 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,828 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,828 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,828 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,828 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,828 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34291,1733113342258-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T04:22:23,830 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,830 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,830 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,830 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,831 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,831 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,41643,1733113342088-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T04:22:23,844 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,845 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,845 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,845 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,845 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,845 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,37413,1733113342217-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T04:22:23,890 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T04:22:23,890 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T04:22:23,893 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34291,1733113342258-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,899 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,899 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.Replication(171): e2eaa0f11f7e,34291,1733113342258 started 2024-12-02T04:22:23,901 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,41643,1733113342088-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,901 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,902 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.Replication(171): e2eaa0f11f7e,41643,1733113342088 started 2024-12-02T04:22:23,927 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T04:22:23,928 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,37413,1733113342217-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,928 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,928 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.Replication(171): e2eaa0f11f7e,37413,1733113342217 started 2024-12-02T04:22:23,932 WARN [e2eaa0f11f7e:34209 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T04:22:23,933 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,933 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,41643,1733113342088, RpcServer on e2eaa0f11f7e/172.17.0.2:41643, sessionid=0x101959075a30001 2024-12-02T04:22:23,934 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,934 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,34291,1733113342258, RpcServer on e2eaa0f11f7e/172.17.0.2:34291, sessionid=0x101959075a30003 2024-12-02T04:22:23,934 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T04:22:23,934 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,934 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T04:22:23,934 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,934 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,41643,1733113342088' 2024-12-02T04:22:23,934 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,34291,1733113342258' 2024-12-02T04:22:23,934 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T04:22:23,934 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T04:22:23,935 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T04:22:23,935 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T04:22:23,936 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T04:22:23,936 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T04:22:23,936 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T04:22:23,936 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T04:22:23,936 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:23,936 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:23,936 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,41643,1733113342088' 2024-12-02T04:22:23,936 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,34291,1733113342258' 2024-12-02T04:22:23,936 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T04:22:23,936 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T04:22:23,937 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T04:22:23,937 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T04:22:23,937 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T04:22:23,938 INFO [RS:0;e2eaa0f11f7e:41643 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T04:22:23,938 INFO [RS:0;e2eaa0f11f7e:41643 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T04:22:23,938 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T04:22:23,938 INFO [RS:2;e2eaa0f11f7e:34291 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T04:22:23,938 INFO [RS:2;e2eaa0f11f7e:34291 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T04:22:23,954 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:23,955 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,37413,1733113342217, RpcServer on e2eaa0f11f7e/172.17.0.2:37413, sessionid=0x101959075a30002 2024-12-02T04:22:23,955 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T04:22:23,955 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,955 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,37413,1733113342217' 2024-12-02T04:22:23,955 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T04:22:23,956 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T04:22:23,957 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T04:22:23,957 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T04:22:23,957 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:23,957 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,37413,1733113342217' 2024-12-02T04:22:23,957 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T04:22:23,958 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T04:22:23,958 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T04:22:23,958 INFO [RS:1;e2eaa0f11f7e:37413 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T04:22:23,959 INFO [RS:1;e2eaa0f11f7e:37413 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T04:22:24,042 INFO [RS:2;e2eaa0f11f7e:34291 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T04:22:24,042 INFO [RS:0;e2eaa0f11f7e:41643 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T04:22:24,046 INFO [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C34291%2C1733113342258, suffix=, logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258, archiveDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs, maxLogs=32 2024-12-02T04:22:24,051 INFO [RS:0;e2eaa0f11f7e:41643 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C41643%2C1733113342088, suffix=, logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,41643,1733113342088, archiveDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs, maxLogs=32 2024-12-02T04:22:24,060 INFO [RS:1;e2eaa0f11f7e:37413 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T04:22:24,062 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258/e2eaa0f11f7e%2C34291%2C1733113342258.1733113344048, exclude list is [], retry=0 2024-12-02T04:22:24,064 INFO [RS:1;e2eaa0f11f7e:37413 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C37413%2C1733113342217, suffix=, logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,37413,1733113342217, archiveDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs, maxLogs=32 2024-12-02T04:22:24,074 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,41643,1733113342088/e2eaa0f11f7e%2C41643%2C1733113342088.1733113344053, exclude list is [], retry=0 2024-12-02T04:22:24,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c,DISK] 2024-12-02T04:22:24,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38471,DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8,DISK] 2024-12-02T04:22:24,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38235,DS-0edf3608-9fbe-4207-8f07-26a6afe16934,DISK] 2024-12-02T04:22:24,080 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,37413,1733113342217/e2eaa0f11f7e%2C37413%2C1733113342217.1733113344065, exclude list is [], retry=0 2024-12-02T04:22:24,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38235,DS-0edf3608-9fbe-4207-8f07-26a6afe16934,DISK] 2024-12-02T04:22:24,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38471,DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8,DISK] 2024-12-02T04:22:24,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c,DISK] 2024-12-02T04:22:24,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38235,DS-0edf3608-9fbe-4207-8f07-26a6afe16934,DISK] 2024-12-02T04:22:24,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c,DISK] 2024-12-02T04:22:24,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38471,DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8,DISK] 2024-12-02T04:22:24,131 INFO [RS:0;e2eaa0f11f7e:41643 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,41643,1733113342088/e2eaa0f11f7e%2C41643%2C1733113342088.1733113344053 2024-12-02T04:22:24,131 INFO [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258/e2eaa0f11f7e%2C34291%2C1733113342258.1733113344048 2024-12-02T04:22:24,132 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37067:37067),(127.0.0.1/127.0.0.1:42627:42627),(127.0.0.1/127.0.0.1:36609:36609)] 2024-12-02T04:22:24,134 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36609:36609),(127.0.0.1/127.0.0.1:42627:42627),(127.0.0.1/127.0.0.1:37067:37067)] 2024-12-02T04:22:24,143 INFO [RS:1;e2eaa0f11f7e:37413 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,37413,1733113342217/e2eaa0f11f7e%2C37413%2C1733113342217.1733113344065 2024-12-02T04:22:24,144 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37067:37067),(127.0.0.1/127.0.0.1:36609:36609),(127.0.0.1/127.0.0.1:42627:42627)] 2024-12-02T04:22:24,184 DEBUG [e2eaa0f11f7e:34209 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T04:22:24,192 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:22:24,200 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:22:24,200 INFO [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:22:24,201 INFO [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:22:24,201 INFO [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:22:24,201 DEBUG [e2eaa0f11f7e:34209 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:22:24,207 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:24,213 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,34291,1733113342258, state=OPENING 2024-12-02T04:22:24,224 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T04:22:24,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:24,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:24,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:24,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:24,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,243 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T04:22:24,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:22:24,421 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:24,424 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54719, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:24,437 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T04:22:24,437 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T04:22:24,438 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T04:22:24,440 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C34291%2C1733113342258.meta, suffix=.meta, logDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258, archiveDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs, maxLogs=32 2024-12-02T04:22:24,455 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258/e2eaa0f11f7e%2C34291%2C1733113342258.meta.1733113344442.meta, exclude list is [], retry=0 2024-12-02T04:22:24,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38235,DS-0edf3608-9fbe-4207-8f07-26a6afe16934,DISK] 2024-12-02T04:22:24,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-b21aa5e9-73e2-4a7b-9584-393adaf4dd7c,DISK] 2024-12-02T04:22:24,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38471,DS-d67d38bf-5370-4767-b1b8-ce4d780f7ba8,DISK] 2024-12-02T04:22:24,489 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/WALs/e2eaa0f11f7e,34291,1733113342258/e2eaa0f11f7e%2C34291%2C1733113342258.meta.1733113344442.meta 2024-12-02T04:22:24,490 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36609:36609),(127.0.0.1/127.0.0.1:37067:37067),(127.0.0.1/127.0.0.1:42627:42627)] 2024-12-02T04:22:24,491 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T04:22:24,492 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-02T04:22:24,493 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:24,495 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T04:22:24,497 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T04:22:24,498 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T04:22:24,509 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T04:22:24,510 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:24,510 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T04:22:24,510 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T04:22:24,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T04:22:24,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T04:22:24,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:24,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:24,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T04:22:24,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T04:22:24,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:24,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:24,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T04:22:24,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T04:22:24,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:24,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:24,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T04:22:24,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T04:22:24,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:24,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T04:22:24,581 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T04:22:24,583 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740 2024-12-02T04:22:24,591 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740 2024-12-02T04:22:24,602 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T04:22:24,602 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T04:22:24,607 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T04:22:24,612 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T04:22:24,615 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59793600, jitterRate=-0.10900592803955078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T04:22:24,615 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T04:22:24,619 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733113344513Writing region info on filesystem at 1733113344513Initializing all the Stores at 1733113344518 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113344518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113344530 (+12 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113344531 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113344531Cleaning up temporary data from old regions at 1733113344602 (+71 ms)Running coprocessor post-open hooks at 1733113344615 (+13 ms)Region opened successfully at 1733113344619 (+4 ms) 2024-12-02T04:22:24,643 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733113344411 2024-12-02T04:22:24,658 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T04:22:24,659 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T04:22:24,663 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:24,668 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,34291,1733113342258, state=OPEN 2024-12-02T04:22:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T04:22:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T04:22:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T04:22:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T04:22:24,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,684 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:24,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T04:22:24,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T04:22:24,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,34291,1733113342258 in 440 msec 2024-12-02T04:22:24,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T04:22:24,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 938 msec 2024-12-02T04:22:24,716 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T04:22:24,716 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T04:22:24,737 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:24,738 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:24,760 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:24,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52435, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:24,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4150 sec 2024-12-02T04:22:24,850 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733113344850, completionTime=-1 2024-12-02T04:22:24,856 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T04:22:24,857 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T04:22:24,909 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T04:22:24,909 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733113404909 2024-12-02T04:22:24,910 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733113464910 2024-12-02T04:22:24,910 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 53 msec 2024-12-02T04:22:24,912 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:22:24,926 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:24,927 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:24,927 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:24,930 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2eaa0f11f7e:34209, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:24,941 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T04:22:24,950 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:24,959 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:25,006 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.618sec 2024-12-02T04:22:25,011 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T04:22:25,013 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T04:22:25,013 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T04:22:25,014 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T04:22:25,014 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T04:22:25,015 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T04:22:25,015 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T04:22:25,041 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T04:22:25,042 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:25,046 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@26a47edd 2024-12-02T04:22:25,048 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T04:22:25,059 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36247, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T04:22:25,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7294ef18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:25,067 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T04:22:25,067 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T04:22:25,069 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T04:22:25,073 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:25,076 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:25,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-02T04:22:25,083 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:22:25,084 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:25,085 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-02T04:22:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T04:22:25,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:22:25,100 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:25,105 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:25,105 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:25,106 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eeb3bbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:25,106 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:25,109 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:25,113 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:25,115 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:25,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d90b37e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:25,119 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:25,126 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:25,127 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:25,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/test.cache.data in system properties and HBase conf 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir in system properties and HBase conf 2024-12-02T04:22:25,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T04:22:25,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741837_1013 (size=349) 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741837_1013 (size=349) 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T04:22:25,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741837_1013 (size=349) 2024-12-02T04:22:25,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/nfs.dump.dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T04:22:25,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T04:22:25,146 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1cd3dc0c66d2745722bcc18163a3b02e, NAME => 'hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:25,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741838_1014 (size=36) 2024-12-02T04:22:25,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741838_1014 (size=36) 2024-12-02T04:22:25,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741838_1014 (size=36) 2024-12-02T04:22:25,199 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:25,199 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 1cd3dc0c66d2745722bcc18163a3b02e, disabling compactions & flushes 2024-12-02T04:22:25,199 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,199 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,199 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. after waiting 0 ms 2024-12-02T04:22:25,199 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,199 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,200 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1cd3dc0c66d2745722bcc18163a3b02e: Waiting for close lock at 1733113345199Disabling compacts and flushes for region at 1733113345199Disabling writes for close at 1733113345199Writing region close event to WAL at 1733113345199Closed at 1733113345199 2024-12-02T04:22:25,202 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:22:25,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733113345203"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113345203"}]},"ts":"1733113345203"} 2024-12-02T04:22:25,215 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T04:22:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T04:22:25,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:22:25,223 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113345220"}]},"ts":"1733113345220"} 2024-12-02T04:22:25,228 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-02T04:22:25,229 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:22:25,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:22:25,231 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:22:25,231 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:22:25,231 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:22:25,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:22:25,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e, ASSIGN}] 2024-12-02T04:22:25,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741839_1015 (size=592039) 2024-12-02T04:22:25,236 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e, ASSIGN 2024-12-02T04:22:25,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741839_1015 (size=592039) 2024-12-02T04:22:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741839_1015 (size=592039) 2024-12-02T04:22:25,239 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:22:25,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T04:22:25,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T04:22:25,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T04:22:25,392 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T04:22:25,392 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1cd3dc0c66d2745722bcc18163a3b02e, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:25,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e, ASSIGN because future has completed 2024-12-02T04:22:25,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1cd3dc0c66d2745722bcc18163a3b02e, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:22:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T04:22:25,679 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:25,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47929, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:25,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,711 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1cd3dc0c66d2745722bcc18163a3b02e, NAME => 'hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.', STARTKEY => '', ENDKEY => ''} 2024-12-02T04:22:25,711 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. service=AccessControlService 2024-12-02T04:22:25,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:25,712 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,712 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:25,712 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,712 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,719 INFO [StoreOpener-1cd3dc0c66d2745722bcc18163a3b02e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,722 INFO [StoreOpener-1cd3dc0c66d2745722bcc18163a3b02e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1cd3dc0c66d2745722bcc18163a3b02e columnFamilyName l 2024-12-02T04:22:25,722 DEBUG [StoreOpener-1cd3dc0c66d2745722bcc18163a3b02e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:25,724 INFO [StoreOpener-1cd3dc0c66d2745722bcc18163a3b02e-1 {}] regionserver.HStore(327): Store=1cd3dc0c66d2745722bcc18163a3b02e/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:25,724 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,726 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,727 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,728 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,728 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T04:22:25,741 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,755 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:25,758 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 1cd3dc0c66d2745722bcc18163a3b02e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71867752, jitterRate=0.07091295719146729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:25,759 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:22:25,762 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1cd3dc0c66d2745722bcc18163a3b02e: Running coprocessor pre-open hook at 1733113345712Writing region info on filesystem at 1733113345712Initializing all the Stores at 1733113345714 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733113345714Cleaning up temporary data from old regions at 1733113345728 (+14 ms)Running coprocessor post-open hooks at 1733113345759 (+31 ms)Region opened successfully at 1733113345762 (+3 ms) 2024-12-02T04:22:25,765 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., pid=6, masterSystemTime=1733113345678 2024-12-02T04:22:25,771 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1cd3dc0c66d2745722bcc18163a3b02e, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:25,774 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,774 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:22:25,776 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=e2eaa0f11f7e,41643,1733113342088, table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T04:22:25,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1cd3dc0c66d2745722bcc18163a3b02e, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:22:25,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T04:22:25,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1cd3dc0c66d2745722bcc18163a3b02e, server=e2eaa0f11f7e,41643,1733113342088 in 407 msec 2024-12-02T04:22:25,824 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T04:22:25,824 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=1cd3dc0c66d2745722bcc18163a3b02e, ASSIGN in 580 msec 2024-12-02T04:22:25,825 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:22:25,825 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113345825"}]},"ts":"1733113345825"} 2024-12-02T04:22:25,830 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-02T04:22:25,840 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:22:25,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 767 msec 2024-12-02T04:22:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T04:22:26,250 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-02T04:22:26,258 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T04:22:26,260 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T04:22:26,260 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34209,1733113341303-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T04:22:27,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:27,258 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:27,532 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:27,533 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T04:22:27,534 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:27,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:27,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:27,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:27,561 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:27,561 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:27,562 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:27,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:27,564 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f3d9fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:27,565 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dba46ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:27,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@133e01b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:27,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8c8026f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:27,747 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T04:22:27,747 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-02T04:22:27,748 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T04:22:27,750 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T04:22:27,810 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:28,221 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:28,571 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:28,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c8a4{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-44443-hadoop-yarn-common-3_4_1_jar-_-any-17572046783455783268/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T04:22:28,603 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2bdc1d38{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-43933-hadoop-yarn-common-3_4_1_jar-_-any-5034875374026552346/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T04:22:28,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20feff49{HTTP/1.1, (http/1.1)}{localhost:44443} 2024-12-02T04:22:28,604 INFO [Time-limited test {}] server.Server(415): Started @15216ms 2024-12-02T04:22:28,606 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@2a8ec6fa{HTTP/1.1, (http/1.1)}{localhost:43933} 2024-12-02T04:22:28,606 INFO [Thread-383 {}] server.Server(415): Started @15218ms 2024-12-02T04:22:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741841_1017 (size=5) 2024-12-02T04:22:28,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741841_1017 (size=5) 2024-12-02T04:22:28,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741841_1017 (size=5) 2024-12-02T04:22:29,553 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T04:22:29,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:29,592 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T04:22:29,593 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:29,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:29,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:29,617 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T04:22:29,619 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:29,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71c771e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:29,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c076869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:29,728 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T04:22:29,728 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T04:22:29,728 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T04:22:29,728 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T04:22:29,748 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:29,770 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:29,931 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:29,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b23b55a{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-39781-hadoop-yarn-common-3_4_1_jar-_-any-4047890648903442079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T04:22:29,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ede5a0d{HTTP/1.1, (http/1.1)}{localhost:39781} 2024-12-02T04:22:29,946 INFO [Time-limited test {}] server.Server(415): Started @16558ms 2024-12-02T04:22:30,034 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:22:30,167 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-02T04:22:30,170 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T04:22:30,272 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T04:22:30,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:30,296 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T04:22:30,297 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T04:22:30,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T04:22:30,302 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T04:22:30,302 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T04:22:30,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T04:22:30,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@676df96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,AVAILABLE} 2024-12-02T04:22:30,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e501ac3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T04:22:30,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T04:22:30,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T04:22:30,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T04:22:30,357 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T04:22:30,366 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:30,374 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:30,521 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T04:22:30,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e5d7b2d{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/java.io.tmpdir/jetty-localhost-43911-hadoop-yarn-common-3_4_1_jar-_-any-4677240506488764234/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T04:22:30,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6882bce1{HTTP/1.1, (http/1.1)}{localhost:43911} 2024-12-02T04:22:30,534 INFO [Time-limited test {}] server.Server(415): Started @17146ms 2024-12-02T04:22:30,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-02T04:22:30,573 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:22:30,610 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=719, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=329, ProcessCount=11, AvailableMemoryMB=7237 2024-12-02T04:22:30,613 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-02T04:22:30,619 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T04:22:30,624 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:30,624 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f7db25a 2024-12-02T04:22:30,624 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T04:22:30,627 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T04:22:30,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:22:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:30,633 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:22:30,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-02T04:22:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T04:22:30,638 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:22:30,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741842_1018 (size=458) 2024-12-02T04:22:30,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741842_1018 (size=458) 2024-12-02T04:22:30,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741842_1018 (size=458) 2024-12-02T04:22:30,675 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c23d8888dc0391efc20dff3e1fdfcba4, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:30,675 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => baecf5b14269cd0ea7532736ec6b3431, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:30,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741844_1020 (size=83) 2024-12-02T04:22:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741844_1020 (size=83) 2024-12-02T04:22:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741843_1019 (size=83) 2024-12-02T04:22:30,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741843_1019 (size=83) 2024-12-02T04:22:30,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741844_1020 (size=83) 2024-12-02T04:22:30,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741843_1019 (size=83) 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing baecf5b14269cd0ea7532736ec6b3431, disabling compactions & flushes 2024-12-02T04:22:30,705 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. after waiting 0 ms 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:30,705 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:30,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for baecf5b14269cd0ea7532736ec6b3431: Waiting for close lock at 1733113350705Disabling compacts and flushes for region at 1733113350705Disabling writes for close at 1733113350705Writing region close event to WAL at 1733113350705Closed at 1733113350705 2024-12-02T04:22:30,707 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:30,707 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing c23d8888dc0391efc20dff3e1fdfcba4, disabling compactions & flushes 2024-12-02T04:22:30,707 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:30,707 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:30,707 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. after waiting 0 ms 2024-12-02T04:22:30,707 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:30,708 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:30,708 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for c23d8888dc0391efc20dff3e1fdfcba4: Waiting for close lock at 1733113350707Disabling compacts and flushes for region at 1733113350707Disabling writes for close at 1733113350707Writing region close event to WAL at 1733113350708 (+1 ms)Closed at 1733113350708 2024-12-02T04:22:30,710 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:22:30,711 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733113350710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113350710"}]},"ts":"1733113350710"} 2024-12-02T04:22:30,711 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733113350710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113350710"}]},"ts":"1733113350710"} 2024-12-02T04:22:30,748 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:22:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T04:22:30,752 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:22:30,753 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113350752"}]},"ts":"1733113350752"} 2024-12-02T04:22:30,758 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T04:22:30,758 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:22:30,761 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:22:30,761 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:22:30,761 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:22:30,761 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:22:30,762 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:22:30,762 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:22:30,762 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:22:30,762 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:22:30,762 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:22:30,762 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:22:30,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, ASSIGN}] 2024-12-02T04:22:30,765 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, ASSIGN 2024-12-02T04:22:30,766 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, ASSIGN 2024-12-02T04:22:30,768 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:22:30,768 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:22:30,918 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:22:30,919 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c23d8888dc0391efc20dff3e1fdfcba4, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:30,920 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=baecf5b14269cd0ea7532736ec6b3431, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:30,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, ASSIGN because future has completed 2024-12-02T04:22:30,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:22:30,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, ASSIGN because future has completed 2024-12-02T04:22:30,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:22:30,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T04:22:31,089 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:31,089 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => c23d8888dc0391efc20dff3e1fdfcba4, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:22:31,089 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. service=AccessControlService 2024-12-02T04:22:31,089 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:31,090 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,090 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:31,090 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,090 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,101 INFO [StoreOpener-c23d8888dc0391efc20dff3e1fdfcba4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,111 INFO [StoreOpener-c23d8888dc0391efc20dff3e1fdfcba4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c23d8888dc0391efc20dff3e1fdfcba4 columnFamilyName cf 2024-12-02T04:22:31,114 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:31,115 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => baecf5b14269cd0ea7532736ec6b3431, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:22:31,115 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. service=AccessControlService 2024-12-02T04:22:31,115 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:31,116 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,116 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:31,116 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,116 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,118 DEBUG [StoreOpener-c23d8888dc0391efc20dff3e1fdfcba4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:31,119 INFO [StoreOpener-c23d8888dc0391efc20dff3e1fdfcba4-1 {}] regionserver.HStore(327): Store=c23d8888dc0391efc20dff3e1fdfcba4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:31,120 INFO [StoreOpener-baecf5b14269cd0ea7532736ec6b3431-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,120 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,122 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,123 INFO [StoreOpener-baecf5b14269cd0ea7532736ec6b3431-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region baecf5b14269cd0ea7532736ec6b3431 columnFamilyName cf 2024-12-02T04:22:31,123 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,124 DEBUG [StoreOpener-baecf5b14269cd0ea7532736ec6b3431-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:31,128 INFO [StoreOpener-baecf5b14269cd0ea7532736ec6b3431-1 {}] regionserver.HStore(327): Store=baecf5b14269cd0ea7532736ec6b3431/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:31,128 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,130 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,131 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,132 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,132 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,132 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,132 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,135 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,137 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,144 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:31,144 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:31,145 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened baecf5b14269cd0ea7532736ec6b3431; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60298689, jitterRate=-0.10147951543331146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:31,145 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,145 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened c23d8888dc0391efc20dff3e1fdfcba4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61200846, jitterRate=-0.08803632855415344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:31,145 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,146 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for baecf5b14269cd0ea7532736ec6b3431: Running coprocessor pre-open hook at 1733113351116Writing region info on filesystem at 1733113351116Initializing all the Stores at 1733113351118 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113351118Cleaning up temporary data from old regions at 1733113351132 (+14 ms)Running coprocessor post-open hooks at 1733113351145 (+13 ms)Region opened successfully at 1733113351146 (+1 ms) 2024-12-02T04:22:31,146 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for c23d8888dc0391efc20dff3e1fdfcba4: Running coprocessor pre-open hook at 1733113351090Writing region info on filesystem at 1733113351090Initializing all the Stores at 1733113351092 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113351092Cleaning up temporary data from old regions at 1733113351132 (+40 ms)Running coprocessor post-open hooks at 1733113351145 (+13 ms)Region opened successfully at 1733113351146 (+1 ms) 2024-12-02T04:22:31,148 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431., pid=11, masterSystemTime=1733113351087 2024-12-02T04:22:31,148 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4., pid=10, masterSystemTime=1733113351081 2024-12-02T04:22:31,152 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:31,153 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:31,153 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c23d8888dc0391efc20dff3e1fdfcba4, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:22:31,154 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:31,154 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:31,157 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=baecf5b14269cd0ea7532736ec6b3431, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:22:31,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:22:31,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:22:31,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-12-02T04:22:31,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088 in 241 msec 2024-12-02T04:22:31,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, ASSIGN in 413 msec 2024-12-02T04:22:31,178 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-12-02T04:22:31,178 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258 in 243 msec 2024-12-02T04:22:31,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-02T04:22:31,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, ASSIGN in 416 msec 2024-12-02T04:22:31,191 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:22:31,192 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113351191"}]},"ts":"1733113351191"} 2024-12-02T04:22:31,197 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T04:22:31,203 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:22:31,208 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T04:22:31,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:22:31,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,223 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55093, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,230 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:31,230 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:31,231 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46549, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:22:31,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,241 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T04:22:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:31,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:31,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:22:31,341 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:31,347 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:31,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 712 msec 2024-12-02T04:22:31,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:31,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:31,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:22:31,677 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-02T04:22:31,678 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T04:22:31,678 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T04:22:31,691 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:22:31,691 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-02T04:22:31,691 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T04:22:31,691 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T04:22:31,693 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:31,693 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T04:22:31,694 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T04:22:31,694 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-02T04:22:31,696 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:22:31,696 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-02T04:22:31,696 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T04:22:31,696 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-02T04:22:31,697 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T04:22:31,697 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T04:22:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T04:22:31,778 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:31,781 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:31,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:31,789 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:31,790 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:22:31,792 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:31,810 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:31,816 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,818 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,823 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:31,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113351834 (current time:1733113351834). 2024-12-02T04:22:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:22:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T04:22:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:22:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f4c58fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:31,837 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:31,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:31,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:31,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a6d5fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:31,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:31,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,839 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:31,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a82e4f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:31,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:31,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,852 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ebeb1ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:31,855 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d5a4fa3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:31,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:31,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,857 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35362, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:31,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b225ede, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:31,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:31,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:22:31,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:31,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:31,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:31,870 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T04:22:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:22:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T04:22:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T04:22:31,883 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:22:31,889 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:22:31,902 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:22:31,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741845_1021 (size=215) 2024-12-02T04:22:31,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741845_1021 (size=215) 2024-12-02T04:22:31,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741845_1021 (size=215) 2024-12-02T04:22:31,915 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:22:31,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431}] 2024-12-02T04:22:31,921 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:31,921 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T04:22:32,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-02T04:22:32,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-02T04:22:32,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:32,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:32,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for baecf5b14269cd0ea7532736ec6b3431: 2024-12-02T04:22:32,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for c23d8888dc0391efc20dff3e1fdfcba4: 2024-12-02T04:22:32,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:32,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:32,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:32,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:32,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:22:32,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:22:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741847_1023 (size=86) 2024-12-02T04:22:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741847_1023 (size=86) 2024-12-02T04:22:32,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741847_1023 (size=86) 2024-12-02T04:22:32,109 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:32,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-02T04:22:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-02T04:22:32,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,114 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 in 200 msec 2024-12-02T04:22:32,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741846_1022 (size=86) 2024-12-02T04:22:32,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741846_1022 (size=86) 2024-12-02T04:22:32,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741846_1022 (size=86) 2024-12-02T04:22:32,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:32,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-02T04:22:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-02T04:22:32,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,125 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T04:22:32,131 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:22:32,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 in 209 msec 2024-12-02T04:22:32,134 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:22:32,137 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:22:32,137 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:22:32,138 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:32,139 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:22:32,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741848_1024 (size=78) 2024-12-02T04:22:32,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741848_1024 (size=78) 2024-12-02T04:22:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741848_1024 (size=78) 2024-12-02T04:22:32,150 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:22:32,150 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,153 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741849_1025 (size=713) 2024-12-02T04:22:32,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741849_1025 (size=713) 2024-12-02T04:22:32,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741849_1025 (size=713) 2024-12-02T04:22:32,177 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:22:32,188 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:22:32,189 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,192 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:22:32,192 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T04:22:32,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 315 msec 2024-12-02T04:22:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T04:22:32,198 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:32,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:22:32,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:22:32,220 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:32,225 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,225 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:32,225 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:22:32,227 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:32,236 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:32,248 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:32,254 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113352254 (current time:1733113352254). 2024-12-02T04:22:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:22:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T04:22:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:22:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@462f7d70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:32,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:32,257 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:32,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:32,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:32,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61c531d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:32,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:32,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,259 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:32,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1549c3c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:32,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:32,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:32,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44458, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:32,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,265 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ab3829, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:32,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:32,267 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:32,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:32,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:32,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ee2b596, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:32,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:32,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,269 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:32,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57b9c616, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:32,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:32,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:32,273 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:32,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:22:32,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:32,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:32,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:32,281 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T04:22:32,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:22:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T04:22:32,287 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:22:32,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T04:22:32,290 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:22:32,299 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:22:32,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741850_1026 (size=210) 2024-12-02T04:22:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741850_1026 (size=210) 2024-12-02T04:22:32,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741850_1026 (size=210) 2024-12-02T04:22:32,341 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:22:32,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431}] 2024-12-02T04:22:32,344 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,345 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T04:22:32,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-02T04:22:32,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-02T04:22:32,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:32,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:32,502 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing baecf5b14269cd0ea7532736ec6b3431 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-02T04:22:32,502 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing c23d8888dc0391efc20dff3e1fdfcba4 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-02T04:22:32,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 is 71, key is 03a198e6bdb7ea515f7bafde67db61b6/cf:q/1733113352212/Put/seqid=0 2024-12-02T04:22:32,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 is 71, key is 1962b89ae87bc21dccb96e649a333384/cf:q/1733113352216/Put/seqid=0 2024-12-02T04:22:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T04:22:32,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741852_1028 (size=8242) 2024-12-02T04:22:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741852_1028 (size=8242) 2024-12-02T04:22:32,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741851_1027 (size=5032) 2024-12-02T04:22:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741852_1028 (size=8242) 2024-12-02T04:22:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741851_1027 (size=5032) 2024-12-02T04:22:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741851_1027 (size=5032) 2024-12-02T04:22:32,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:32,741 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,741 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/.tmp/cf/9795175e957b41fcb2f3ff4445e1fce3, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=c23d8888dc0391efc20dff3e1fdfcba4] 2024-12-02T04:22:32,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/.tmp/cf/4192bbbf157a4cbbbca367ef1f90feaf, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=baecf5b14269cd0ea7532736ec6b3431] 2024-12-02T04:22:32,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/.tmp/cf/9795175e957b41fcb2f3ff4445e1fce3 is 224, key is 0bf16834dfa156018abfa20f5ab758291/cf:q/1733113352212/Put/seqid=0 2024-12-02T04:22:32,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/.tmp/cf/4192bbbf157a4cbbbca367ef1f90feaf is 224, key is 1086c31c683458e2eb8d16a450f5638a1/cf:q/1733113352216/Put/seqid=0 2024-12-02T04:22:32,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741853_1029 (size=15939) 2024-12-02T04:22:32,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741853_1029 (size=15939) 2024-12-02T04:22:32,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741853_1029 (size=15939) 2024-12-02T04:22:32,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741854_1030 (size=5754) 2024-12-02T04:22:32,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741854_1030 (size=5754) 2024-12-02T04:22:32,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741854_1030 (size=5754) 2024-12-02T04:22:32,776 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/.tmp/cf/4192bbbf157a4cbbbca367ef1f90feaf 2024-12-02T04:22:32,776 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/.tmp/cf/9795175e957b41fcb2f3ff4445e1fce3 2024-12-02T04:22:32,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/.tmp/cf/4192bbbf157a4cbbbca367ef1f90feaf as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf 2024-12-02T04:22:32,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/.tmp/cf/9795175e957b41fcb2f3ff4445e1fce3 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3 2024-12-02T04:22:32,798 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3, entries=2, sequenceid=6, filesize=5.6 K 2024-12-02T04:22:32,801 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf, entries=48, sequenceid=6, filesize=15.6 K 2024-12-02T04:22:32,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for c23d8888dc0391efc20dff3e1fdfcba4 in 303ms, sequenceid=6, compaction requested=false 2024-12-02T04:22:32,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for baecf5b14269cd0ea7532736ec6b3431 in 303ms, sequenceid=6, compaction requested=false 2024-12-02T04:22:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-02T04:22:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-02T04:22:32,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for c23d8888dc0391efc20dff3e1fdfcba4: 2024-12-02T04:22:32,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for baecf5b14269cd0ea7532736ec6b3431: 2024-12-02T04:22:32,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:32,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf] hfiles 2024-12-02T04:22:32,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3] hfiles 2024-12-02T04:22:32,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741856_1032 (size=125) 2024-12-02T04:22:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741855_1031 (size=125) 2024-12-02T04:22:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741856_1032 (size=125) 2024-12-02T04:22:32,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741856_1032 (size=125) 2024-12-02T04:22:32,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:22:32,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-02T04:22:32,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741855_1031 (size=125) 2024-12-02T04:22:32,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741855_1031 (size=125) 2024-12-02T04:22:32,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-02T04:22:32,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,876 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:22:32,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-02T04:22:32,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-02T04:22:32,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,884 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4 in 536 msec 2024-12-02T04:22:32,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-12-02T04:22:32,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure baecf5b14269cd0ea7532736ec6b3431 in 543 msec 2024-12-02T04:22:32,889 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:22:32,890 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:22:32,891 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:22:32,891 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:22:32,892 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:32,893 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4] hfiles 2024-12-02T04:22:32,894 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:22:32,894 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:22:32,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741857_1033 (size=309) 2024-12-02T04:22:32,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741857_1033 (size=309) 2024-12-02T04:22:32,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741857_1033 (size=309) 2024-12-02T04:22:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T04:22:32,921 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:22:32,921 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,923 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741858_1034 (size=1023) 2024-12-02T04:22:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741858_1034 (size=1023) 2024-12-02T04:22:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741858_1034 (size=1023) 2024-12-02T04:22:32,967 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:22:32,987 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:22:32,988 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:32,990 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:22:32,990 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T04:22:32,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 708 msec 2024-12-02T04:22:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T04:22:33,431 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:33,475 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:33,479 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:33,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:33,486 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T04:22:33,492 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:33,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:33,502 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T04:22:33,508 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:33,509 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T04:22:33,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:22:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:33,519 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:22:33,519 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:33,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-02T04:22:33,523 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:22:33,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T04:22:33,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741859_1035 (size=390) 2024-12-02T04:22:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741859_1035 (size=390) 2024-12-02T04:22:33,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T04:22:33,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741859_1035 (size=390) 2024-12-02T04:22:33,637 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 15127f40b26897f83013d0d54e4adbdf, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:33,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741860_1036 (size=75) 2024-12-02T04:22:33,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741860_1036 (size=75) 2024-12-02T04:22:33,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741860_1036 (size=75) 2024-12-02T04:22:33,658 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:33,658 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 15127f40b26897f83013d0d54e4adbdf, disabling compactions & flushes 2024-12-02T04:22:33,658 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:33,658 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:33,658 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. after waiting 0 ms 2024-12-02T04:22:33,659 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:33,659 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:33,659 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 15127f40b26897f83013d0d54e4adbdf: Waiting for close lock at 1733113353658Disabling compacts and flushes for region at 1733113353658Disabling writes for close at 1733113353658Writing region close event to WAL at 1733113353659 (+1 ms)Closed at 1733113353659 2024-12-02T04:22:33,660 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:22:33,661 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733113353661"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113353661"}]},"ts":"1733113353661"} 2024-12-02T04:22:33,664 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T04:22:33,667 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:22:33,668 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113353668"}]},"ts":"1733113353668"} 2024-12-02T04:22:33,673 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T04:22:33,673 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:22:33,675 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:22:33,675 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:22:33,676 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:22:33,676 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:22:33,676 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:22:33,676 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:22:33,676 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:22:33,676 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:22:33,676 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:22:33,676 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:22:33,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, ASSIGN}] 2024-12-02T04:22:33,679 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, ASSIGN 2024-12-02T04:22:33,683 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:22:33,834 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T04:22:33,834 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=15127f40b26897f83013d0d54e4adbdf, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T04:22:33,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, ASSIGN because future has completed 2024-12-02T04:22:33,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:22:33,995 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T04:22:33,998 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41247, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:22:34,006 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:34,006 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 15127f40b26897f83013d0d54e4adbdf, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.', STARTKEY => '', ENDKEY => ''} 2024-12-02T04:22:34,007 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. service=AccessControlService 2024-12-02T04:22:34,007 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:34,007 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,007 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:34,007 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,007 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,010 INFO [StoreOpener-15127f40b26897f83013d0d54e4adbdf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,014 INFO [StoreOpener-15127f40b26897f83013d0d54e4adbdf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15127f40b26897f83013d0d54e4adbdf columnFamilyName cf 2024-12-02T04:22:34,014 DEBUG [StoreOpener-15127f40b26897f83013d0d54e4adbdf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:34,015 INFO [StoreOpener-15127f40b26897f83013d0d54e4adbdf-1 {}] regionserver.HStore(327): Store=15127f40b26897f83013d0d54e4adbdf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:34,015 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,017 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,018 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,019 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,019 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,024 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,027 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:22:34,029 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 15127f40b26897f83013d0d54e4adbdf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65247038, jitterRate=-0.027743369340896606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:34,029 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:34,031 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 15127f40b26897f83013d0d54e4adbdf: Running coprocessor pre-open hook at 1733113354007Writing region info on filesystem at 1733113354007Initializing all the Stores at 1733113354010 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113354010Cleaning up temporary data from old regions at 1733113354020 (+10 ms)Running coprocessor post-open hooks at 1733113354029 (+9 ms)Region opened successfully at 1733113354030 (+1 ms) 2024-12-02T04:22:34,033 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., pid=20, masterSystemTime=1733113353994 2024-12-02T04:22:34,037 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:34,038 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=15127f40b26897f83013d0d54e4adbdf, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:34,041 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:34,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:22:34,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-02T04:22:34,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217 in 216 msec 2024-12-02T04:22:34,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-02T04:22:34,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, ASSIGN in 385 msec 2024-12-02T04:22:34,071 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:22:34,071 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113354071"}]},"ts":"1733113354071"} 2024-12-02T04:22:34,075 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T04:22:34,077 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:22:34,078 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T04:22:34,085 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T04:22:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T04:22:34,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:34,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:34,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:34,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:22:34,169 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 652 msec 2024-12-02T04:22:34,176 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,177 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:22:34,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T04:22:34,659 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:34,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:34,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T04:22:36,594 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:22:36,845 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-02T04:22:37,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T04:22:37,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T04:22:37,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T04:22:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T04:22:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T04:22:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T04:22:39,914 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733113354666/Put/seqid=0 2024-12-02T04:22:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T04:22:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T04:22:39,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T04:22:39,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@289cb5b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:39,943 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:39,943 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:39,952 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:39,953 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:39,953 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:39,953 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@382c665a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:39,953 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:39,954 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:39,954 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:39,956 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:39,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3679b5e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:39,959 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:39,961 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:39,961 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:39,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:40,005 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:34539/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T04:22:40,006 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T04:22:40,008 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:22:40,008 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@724ca99c 2024-12-02T04:22:40,008 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T04:22:40,011 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T04:22:40,019 WARN [IPC Server handler 2 on default port 34539 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T04:22:40,027 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2] 2024-12-02T04:22:40,030 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:40,043 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:40,049 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:40,096 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34539/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-02T04:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:40,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45773, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-02T04:22:40,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T04:22:40,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:45773 deadline: 1733113420140, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-02T04:22:40,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:22:40,180 WARN [IPC Server handler 2 on default port 34539 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T04:22:40,215 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34539/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file for inclusion in 15127f40b26897f83013d0d54e4adbdf/cf 2024-12-02T04:22:40,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-02T04:22:40,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T04:22:40,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:34539/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T04:22:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(2603): Flush status journal for 15127f40b26897f83013d0d54e4adbdf: 2024-12-02T04:22:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:34539/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/output/cf/test_file to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/staging/jenkins__testExportFileSystemStateWithSplitRegion__q0agpe2bf4vhgfp7stsls5t2nq2atvj83plqaiieo37r9he503qdhsj0glpg7rkk/cf/test_file 2024-12-02T04:22:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/staging/jenkins__testExportFileSystemStateWithSplitRegion__q0agpe2bf4vhgfp7stsls5t2nq2atvj83plqaiieo37r9he503qdhsj0glpg7rkk/cf/test_file as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ 2024-12-02T04:22:40,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/staging/jenkins__testExportFileSystemStateWithSplitRegion__q0agpe2bf4vhgfp7stsls5t2nq2atvj83plqaiieo37r9he503qdhsj0glpg7rkk/cf/test_file into 15127f40b26897f83013d0d54e4adbdf/cf as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ - updating store file list. 2024-12-02T04:22:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T04:22:40,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ into 15127f40b26897f83013d0d54e4adbdf/cf 2024-12-02T04:22:40,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/staging/jenkins__testExportFileSystemStateWithSplitRegion__q0agpe2bf4vhgfp7stsls5t2nq2atvj83plqaiieo37r9he503qdhsj0glpg7rkk/cf/test_file into 15127f40b26897f83013d0d54e4adbdf/cf (new location: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_) 2024-12-02T04:22:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/staging/jenkins__testExportFileSystemStateWithSplitRegion__q0agpe2bf4vhgfp7stsls5t2nq2atvj83plqaiieo37r9he503qdhsj0glpg7rkk/cf/test_file 2024-12-02T04:22:40,330 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T04:22:40,330 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:22:40,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:40,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:40,330 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:40,331 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:40,332 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=e2eaa0f11f7e:37413 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T04:22:40,333 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T04:22:40,333 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2 from cache 2024-12-02T04:22:40,333 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-02T04:22:40,348 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2] 2024-12-02T04:22:40,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=15127f40b26897f83013d0d54e4adbdf, daughterA=ff229ab504813d939397799e3f1620d8, daughterB=dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:40,375 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=15127f40b26897f83013d0d54e4adbdf, daughterA=ff229ab504813d939397799e3f1620d8, daughterB=dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:40,375 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=15127f40b26897f83013d0d54e4adbdf, daughterA=ff229ab504813d939397799e3f1620d8, daughterB=dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:40,375 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=15127f40b26897f83013d0d54e4adbdf, daughterA=ff229ab504813d939397799e3f1620d8, daughterB=dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T04:22:40,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, UNASSIGN}] 2024-12-02T04:22:40,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, UNASSIGN 2024-12-02T04:22:40,403 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=15127f40b26897f83013d0d54e4adbdf, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:40,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, UNASSIGN because future has completed 2024-12-02T04:22:40,407 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T04:22:40,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:22:40,439 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e2eaa0f11f7e:34291 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-02T04:22:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T04:22:40,627 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:40,627 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T04:22:40,628 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 15127f40b26897f83013d0d54e4adbdf, disabling compactions & flushes 2024-12-02T04:22:40,628 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:40,628 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:40,628 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. after waiting 0 ms 2024-12-02T04:22:40,628 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:40,658 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-02T04:22:40,663 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:22:40,663 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf. 2024-12-02T04:22:40,663 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 15127f40b26897f83013d0d54e4adbdf: Waiting for close lock at 1733113360628Running coprocessor pre-close hooks at 1733113360628Disabling compacts and flushes for region at 1733113360628Disabling writes for close at 1733113360628Writing region close event to WAL at 1733113360635 (+7 ms)Running coprocessor post-close hooks at 1733113360659 (+24 ms)Closed at 1733113360663 (+4 ms) 2024-12-02T04:22:40,669 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=15127f40b26897f83013d0d54e4adbdf, regionState=CLOSED 2024-12-02T04:22:40,676 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:40,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:22:40,687 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-02T04:22:40,689 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 15127f40b26897f83013d0d54e4adbdf, server=e2eaa0f11f7e,37413,1733113342217 in 274 msec 2024-12-02T04:22:40,691 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-02T04:22:40,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=15127f40b26897f83013d0d54e4adbdf, UNASSIGN in 298 msec 2024-12-02T04:22:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T04:22:40,712 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:40,719 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=15127f40b26897f83013d0d54e4adbdf, threads=1 2024-12-02T04:22:40,724 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ for region: 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:40,743 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T04:22:40,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741864_1040 (size=21) 2024-12-02T04:22:40,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741864_1040 (size=21) 2024-12-02T04:22:40,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741864_1040 (size=21) 2024-12-02T04:22:40,824 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T04:22:40,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741865_1041 (size=21) 2024-12-02T04:22:40,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741865_1041 (size=21) 2024-12-02T04:22:40,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741865_1041 (size=21) 2024-12-02T04:22:40,924 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ for region: 15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:22:40,927 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 15127f40b26897f83013d0d54e4adbdf Daughter A: [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf] storefiles, Daughter B: [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf] storefiles. 2024-12-02T04:22:40,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741866_1042 (size=76) 2024-12-02T04:22:40,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741866_1042 (size=76) 2024-12-02T04:22:40,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741866_1042 (size=76) 2024-12-02T04:22:40,981 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:41,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T04:22:41,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741867_1043 (size=76) 2024-12-02T04:22:41,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741867_1043 (size=76) 2024-12-02T04:22:41,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741867_1043 (size=76) 2024-12-02T04:22:41,014 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:41,034 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T04:22:41,046 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T04:22:41,051 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733113361050"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733113361050"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733113361050"}]},"ts":"1733113361050"} 2024-12-02T04:22:41,051 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733113361050"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113361050"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733113361050"}]},"ts":"1733113361050"} 2024-12-02T04:22:41,053 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733113361050"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113361050"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733113361050"}]},"ts":"1733113361050"} 2024-12-02T04:22:41,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, ASSIGN}] 2024-12-02T04:22:41,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, ASSIGN 2024-12-02T04:22:41,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, ASSIGN 2024-12-02T04:22:41,077 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, ASSIGN; state=SPLITTING_NEW, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:22:41,077 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, ASSIGN; state=SPLITTING_NEW, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:22:41,228 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:22:41,229 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=ff229ab504813d939397799e3f1620d8, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:41,229 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=dc631b2f75a4e7bf5ab5ea2acc4850f5, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:41,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, ASSIGN because future has completed 2024-12-02T04:22:41,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:22:41,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, ASSIGN because future has completed 2024-12-02T04:22:41,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:22:41,390 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:22:41,390 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => ff229ab504813d939397799e3f1620d8, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.', STARTKEY => '', ENDKEY => '5'} 2024-12-02T04:22:41,390 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. service=AccessControlService 2024-12-02T04:22:41,390 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:41,391 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,391 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:41,391 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,391 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,392 INFO [StoreOpener-ff229ab504813d939397799e3f1620d8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,394 INFO [StoreOpener-ff229ab504813d939397799e3f1620d8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff229ab504813d939397799e3f1620d8 columnFamilyName cf 2024-12-02T04:22:41,394 DEBUG [StoreOpener-ff229ab504813d939397799e3f1620d8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:41,405 DEBUG [StoreFileOpener-ff229ab504813d939397799e3f1620d8-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf: NONE, but ROW specified in column family configuration 2024-12-02T04:22:41,423 DEBUG [StoreOpener-ff229ab504813d939397799e3f1620d8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_-bottom 2024-12-02T04:22:41,424 INFO [StoreOpener-ff229ab504813d939397799e3f1620d8-1 {}] regionserver.HStore(327): Store=ff229ab504813d939397799e3f1620d8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:41,424 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,426 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,428 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,429 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,429 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,431 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,432 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened ff229ab504813d939397799e3f1620d8; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72903912, jitterRate=0.08635294437408447}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:41,432 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,433 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for ff229ab504813d939397799e3f1620d8: Running coprocessor pre-open hook at 1733113361391Writing region info on filesystem at 1733113361391Initializing all the Stores at 1733113361392 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113361392Cleaning up temporary data from old regions at 1733113361429 (+37 ms)Running coprocessor post-open hooks at 1733113361432 (+3 ms)Region opened successfully at 1733113361433 (+1 ms) 2024-12-02T04:22:41,434 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8., pid=26, masterSystemTime=1733113361385 2024-12-02T04:22:41,435 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.,because compaction is disabled. 2024-12-02T04:22:41,437 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:22:41,437 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:22:41,437 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:22:41,438 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => dc631b2f75a4e7bf5ab5ea2acc4850f5, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.', STARTKEY => '5', ENDKEY => ''} 2024-12-02T04:22:41,438 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=ff229ab504813d939397799e3f1620d8, regionState=OPEN, openSeqNum=7, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:41,438 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. service=AccessControlService 2024-12-02T04:22:41,438 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:22:41,438 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,439 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:22:41,439 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,439 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,441 INFO [StoreOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:22:41,442 INFO [StoreOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc631b2f75a4e7bf5ab5ea2acc4850f5 columnFamilyName cf 2024-12-02T04:22:41,443 DEBUG [StoreOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:41,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-02T04:22:41,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217 in 211 msec 2024-12-02T04:22:41,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, ASSIGN in 374 msec 2024-12-02T04:22:41,457 DEBUG [StoreFileOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf: NONE, but ROW specified in column family configuration 2024-12-02T04:22:41,459 DEBUG [StoreOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_-top 2024-12-02T04:22:41,460 INFO [StoreOpener-dc631b2f75a4e7bf5ab5ea2acc4850f5-1 {}] regionserver.HStore(327): Store=dc631b2f75a4e7bf5ab5ea2acc4850f5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:22:41,460 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,461 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,462 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,463 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,463 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,465 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,466 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened dc631b2f75a4e7bf5ab5ea2acc4850f5; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61888011, jitterRate=-0.07779677212238312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:22:41,466 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,466 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for dc631b2f75a4e7bf5ab5ea2acc4850f5: Running coprocessor pre-open hook at 1733113361439Writing region info on filesystem at 1733113361439Initializing all the Stores at 1733113361440 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113361440Cleaning up temporary data from old regions at 1733113361463 (+23 ms)Running coprocessor post-open hooks at 1733113361466 (+3 ms)Region opened successfully at 1733113361466 2024-12-02T04:22:41,467 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5., pid=27, masterSystemTime=1733113361385 2024-12-02T04:22:41,467 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.,because compaction is disabled. 2024-12-02T04:22:41,469 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:22:41,469 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:22:41,470 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=dc631b2f75a4e7bf5ab5ea2acc4850f5, regionState=OPEN, openSeqNum=7, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:22:41,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:22:41,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-02T04:22:41,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217 in 241 msec 2024-12-02T04:22:41,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-02T04:22:41,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, ASSIGN in 404 msec 2024-12-02T04:22:41,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=15127f40b26897f83013d0d54e4adbdf, daughterA=ff229ab504813d939397799e3f1620d8, daughterB=dc631b2f75a4e7bf5ab5ea2acc4850f5 in 1.1120 sec 2024-12-02T04:22:41,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T04:22:41,518 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:41,518 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T04:22:41,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113361523 (current time:1733113361523). 2024-12-02T04:22:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:22:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T04:22:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:22:41,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54bd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:41,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:41,525 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:41,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:41,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:41,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ae1073a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:41,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:41,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,527 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:41,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505f5f67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:41,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:41,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:41,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:41,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,532 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7956d7ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:22:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:22:41,533 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:22:41,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:22:41,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:22:41,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28134687, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:22:41,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:22:41,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,535 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:22:41,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4db83629, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:22:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:22:41,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:22:41,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:41,538 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:41,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:22:41,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:22:41,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:22:41,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:22:41,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:22:41,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:22:41,543 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:22:41,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T04:22:41,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:22:41,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T04:22:41,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T04:22:41,547 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:22:41,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T04:22:41,548 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:22:41,552 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:22:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741868_1044 (size=197) 2024-12-02T04:22:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741868_1044 (size=197) 2024-12-02T04:22:41,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741868_1044 (size=197) 2024-12-02T04:22:41,562 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:22:41,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff229ab504813d939397799e3f1620d8}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5}] 2024-12-02T04:22:41,563 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,563 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T04:22:41,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T04:22:41,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-02T04:22:41,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-02T04:22:41,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:22:41,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for dc631b2f75a4e7bf5ab5ea2acc4850f5: 2024-12-02T04:22:41,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:22:41,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:41,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for ff229ab504813d939397799e3f1620d8: 2024-12-02T04:22:41,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T04:22:41,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:41,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_-top] hfiles 2024-12-02T04:22:41,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:22:41,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_-bottom] hfiles 2024-12-02T04:22:41,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741869_1045 (size=182) 2024-12-02T04:22:41,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741870_1046 (size=182) 2024-12-02T04:22:41,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741870_1046 (size=182) 2024-12-02T04:22:41,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741869_1045 (size=182) 2024-12-02T04:22:41,731 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:22:41,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741869_1045 (size=182) 2024-12-02T04:22:41,731 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-02T04:22:41,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-02T04:22:41,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741870_1046 (size=182) 2024-12-02T04:22:41,733 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff229ab504813d939397799e3f1620d8 2024-12-02T04:22:41,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:22:41,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-02T04:22:41,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-02T04:22:41,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,734 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:22:41,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff229ab504813d939397799e3f1620d8 in 173 msec 2024-12-02T04:22:41,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-02T04:22:41,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5 in 173 msec 2024-12-02T04:22:41,739 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:22:41,740 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:22:41,740 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:22:41,740 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:22:41,741 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_] hfiles 2024-12-02T04:22:41,741 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ 2024-12-02T04:22:41,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741871_1047 (size=129) 2024-12-02T04:22:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741871_1047 (size=129) 2024-12-02T04:22:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741871_1047 (size=129) 2024-12-02T04:22:41,752 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 15127f40b26897f83013d0d54e4adbdf, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,753 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:22:41,754 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:22:41,754 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,755 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741872_1048 (size=891) 2024-12-02T04:22:41,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741872_1048 (size=891) 2024-12-02T04:22:41,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741872_1048 (size=891) 2024-12-02T04:22:41,776 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:22:41,786 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:22:41,787 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,790 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:22:41,790 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T04:22:41,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 246 msec 2024-12-02T04:22:41,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T04:22:41,868 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:22:41,868 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868 2024-12-02T04:22:41,869 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:41,901 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:22:41,901 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,904 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:22:41,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:22:41,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741874_1050 (size=197) 2024-12-02T04:22:41,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741874_1050 (size=197) 2024-12-02T04:22:41,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741874_1050 (size=197) 2024-12-02T04:22:41,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741873_1049 (size=891) 2024-12-02T04:22:41,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741873_1049 (size=891) 2024-12-02T04:22:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741873_1049 (size=891) 2024-12-02T04:22:41,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:41,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:41,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-10522971463181770977.jar 2024-12-02T04:22:42,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-10256956178043911579.jar 2024-12-02T04:22:42,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:22:42,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:22:42,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:22:42,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:22:42,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:22:42,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:22:42,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:22:42,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:22:42,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:22:42,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:22:42,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:22:42,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:22:42,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:22:42,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:22:42,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:22:42,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:22:42,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:22:42,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:22:42,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:22:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741875_1051 (size=131440) 2024-12-02T04:22:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741875_1051 (size=131440) 2024-12-02T04:22:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741875_1051 (size=131440) 2024-12-02T04:22:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T04:22:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T04:22:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T04:22:43,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T04:22:43,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T04:22:43,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T04:22:43,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741878_1054 (size=903927) 2024-12-02T04:22:43,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741878_1054 (size=903927) 2024-12-02T04:22:43,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741878_1054 (size=903927) 2024-12-02T04:22:43,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T04:22:43,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T04:22:43,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T04:22:43,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741880_1056 (size=6424732) 2024-12-02T04:22:43,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741880_1056 (size=6424732) 2024-12-02T04:22:43,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741880_1056 (size=6424732) 2024-12-02T04:22:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741881_1057 (size=1877034) 2024-12-02T04:22:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741881_1057 (size=1877034) 2024-12-02T04:22:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741881_1057 (size=1877034) 2024-12-02T04:22:43,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741882_1058 (size=77835) 2024-12-02T04:22:43,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741882_1058 (size=77835) 2024-12-02T04:22:43,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741882_1058 (size=77835) 2024-12-02T04:22:43,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741883_1059 (size=30949) 2024-12-02T04:22:43,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741883_1059 (size=30949) 2024-12-02T04:22:43,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741883_1059 (size=30949) 2024-12-02T04:22:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741884_1060 (size=440957) 2024-12-02T04:22:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741884_1060 (size=440957) 2024-12-02T04:22:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741884_1060 (size=440957) 2024-12-02T04:22:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741885_1061 (size=1597213) 2024-12-02T04:22:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741885_1061 (size=1597213) 2024-12-02T04:22:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741885_1061 (size=1597213) 2024-12-02T04:22:43,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T04:22:43,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T04:22:43,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T04:22:43,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741887_1063 (size=232957) 2024-12-02T04:22:43,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741887_1063 (size=232957) 2024-12-02T04:22:43,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741887_1063 (size=232957) 2024-12-02T04:22:43,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741888_1064 (size=127628) 2024-12-02T04:22:43,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741888_1064 (size=127628) 2024-12-02T04:22:43,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741888_1064 (size=127628) 2024-12-02T04:22:43,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741889_1065 (size=20406) 2024-12-02T04:22:43,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741889_1065 (size=20406) 2024-12-02T04:22:43,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741889_1065 (size=20406) 2024-12-02T04:22:43,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T04:22:43,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T04:22:43,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T04:22:43,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741891_1067 (size=217634) 2024-12-02T04:22:43,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741891_1067 (size=217634) 2024-12-02T04:22:43,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741891_1067 (size=217634) 2024-12-02T04:22:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T04:22:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T04:22:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T04:22:43,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741893_1069 (size=322274) 2024-12-02T04:22:43,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741893_1069 (size=322274) 2024-12-02T04:22:43,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741893_1069 (size=322274) 2024-12-02T04:22:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741894_1070 (size=503880) 2024-12-02T04:22:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741894_1070 (size=503880) 2024-12-02T04:22:43,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741894_1070 (size=503880) 2024-12-02T04:22:43,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741895_1071 (size=29229) 2024-12-02T04:22:43,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741895_1071 (size=29229) 2024-12-02T04:22:43,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741895_1071 (size=29229) 2024-12-02T04:22:43,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741896_1072 (size=24096) 2024-12-02T04:22:43,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741896_1072 (size=24096) 2024-12-02T04:22:43,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741896_1072 (size=24096) 2024-12-02T04:22:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741897_1073 (size=111872) 2024-12-02T04:22:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741897_1073 (size=111872) 2024-12-02T04:22:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741897_1073 (size=111872) 2024-12-02T04:22:43,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741898_1074 (size=45609) 2024-12-02T04:22:43,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741898_1074 (size=45609) 2024-12-02T04:22:43,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741898_1074 (size=45609) 2024-12-02T04:22:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741899_1075 (size=136454) 2024-12-02T04:22:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741899_1075 (size=136454) 2024-12-02T04:22:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741899_1075 (size=136454) 2024-12-02T04:22:43,811 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:22:43,817 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-02T04:22:43,824 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=15127f40b26897f83013d0d54e4adbdf-07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_. 2024-12-02T04:22:43,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=15127f40b26897f83013d0d54e4adbdf-07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_. 2024-12-02T04:22:43,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-02T04:22:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741900_1076 (size=244) 2024-12-02T04:22:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741900_1076 (size=244) 2024-12-02T04:22:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741900_1076 (size=244) 2024-12-02T04:22:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741901_1077 (size=17) 2024-12-02T04:22:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741901_1077 (size=17) 2024-12-02T04:22:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741901_1077 (size=17) 2024-12-02T04:22:44,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741902_1078 (size=304054) 2024-12-02T04:22:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741902_1078 (size=304054) 2024-12-02T04:22:44,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741902_1078 (size=304054) 2024-12-02T04:22:44,426 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:22:44,426 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:22:44,644 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0001_000001 (auth:SIMPLE) from 127.0.0.1:50100 2024-12-02T04:22:46,167 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:22:50,561 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:22:51,100 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0001_000001 (auth:SIMPLE) from 127.0.0.1:59896 2024-12-02T04:22:51,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741903_1079 (size=349752) 2024-12-02T04:22:51,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741903_1079 (size=349752) 2024-12-02T04:22:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741903_1079 (size=349752) 2024-12-02T04:22:53,548 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0001_000001 (auth:SIMPLE) from 127.0.0.1:48614 2024-12-02T04:23:07,040 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T04:23:07,040 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T04:23:16,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c23d8888dc0391efc20dff3e1fdfcba4, had cached 0 bytes from a total of 5754 2024-12-02T04:23:16,116 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region baecf5b14269cd0ea7532736ec6b3431, had cached 0 bytes from a total of 15939 2024-12-02T04:23:20,561 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:23:25,205 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region baecf5b14269cd0ea7532736ec6b3431 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:23:25,205 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1cd3dc0c66d2745722bcc18163a3b02e changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:23:25,207 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c23d8888dc0391efc20dff3e1fdfcba4 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:23:26,391 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ff229ab504813d939397799e3f1620d8, had cached 0 bytes from a total of 320414712 2024-12-02T04:23:26,439 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region dc631b2f75a4e7bf5ab5ea2acc4850f5, had cached 0 bytes from a total of 320414712 2024-12-02T04:23:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T04:23:29,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T04:23:29,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T04:23:50,562 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:23:59,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T04:23:59,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T04:23:59,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T04:24:01,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c23d8888dc0391efc20dff3e1fdfcba4, had cached 0 bytes from a total of 5754 2024-12-02T04:24:01,116 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region baecf5b14269cd0ea7532736ec6b3431, had cached 0 bytes from a total of 15939 2024-12-02T04:24:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T04:24:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T04:24:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T04:24:11,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741907_1083 (size=17520) 2024-12-02T04:24:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741907_1083 (size=17520) 2024-12-02T04:24:11,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741907_1083 (size=17520) 2024-12-02T04:24:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741908_1084 (size=482) 2024-12-02T04:24:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741908_1084 (size=482) 2024-12-02T04:24:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741908_1084 (size=482) 2024-12-02T04:24:11,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741909_1085 (size=17520) 2024-12-02T04:24:11,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741909_1085 (size=17520) 2024-12-02T04:24:11,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741909_1085 (size=17520) 2024-12-02T04:24:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741910_1086 (size=349752) 2024-12-02T04:24:11,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741910_1086 (size=349752) 2024-12-02T04:24:11,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741910_1086 (size=349752) 2024-12-02T04:24:11,268 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000002/launch_container.sh] 2024-12-02T04:24:11,268 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000002/container_tokens] 2024-12-02T04:24:11,268 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000002/sysfs] 2024-12-02T04:24:11,270 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0001_000001 (auth:SIMPLE) from 127.0.0.1:37310 2024-12-02T04:24:11,391 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ff229ab504813d939397799e3f1620d8, had cached 0 bytes from a total of 320414712 2024-12-02T04:24:11,439 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region dc631b2f75a4e7bf5ab5ea2acc4850f5, had cached 0 bytes from a total of 320414712 2024-12-02T04:24:12,999 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:24:13,001 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:24:13,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,012 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:24:13,012 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:24:13,013 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,013 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T04:24:13,013 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T04:24:13,013 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,014 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T04:24:13,014 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113361868/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T04:24:13,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T04:24:13,034 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113453033"}]},"ts":"1733113453033"} 2024-12-02T04:24:13,036 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T04:24:13,036 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T04:24:13,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-02T04:24:13,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, UNASSIGN}] 2024-12-02T04:24:13,044 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, UNASSIGN 2024-12-02T04:24:13,044 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, UNASSIGN 2024-12-02T04:24:13,045 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=dc631b2f75a4e7bf5ab5ea2acc4850f5, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:13,045 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=ff229ab504813d939397799e3f1620d8, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:13,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, UNASSIGN because future has completed 2024-12-02T04:24:13,047 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:13,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:13,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, UNASSIGN because future has completed 2024-12-02T04:24:13,049 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:13,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T04:24:13,201 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close ff229ab504813d939397799e3f1620d8 2024-12-02T04:24:13,201 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:13,201 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing ff229ab504813d939397799e3f1620d8, disabling compactions & flushes 2024-12-02T04:24:13,201 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:24:13,201 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:24:13,201 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. after waiting 0 ms 2024-12-02T04:24:13,201 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:24:13,207 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T04:24:13,208 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:13,208 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8. 2024-12-02T04:24:13,208 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for ff229ab504813d939397799e3f1620d8: Waiting for close lock at 1733113453201Running coprocessor pre-close hooks at 1733113453201Disabling compacts and flushes for region at 1733113453201Disabling writes for close at 1733113453201Writing region close event to WAL at 1733113453202 (+1 ms)Running coprocessor post-close hooks at 1733113453207 (+5 ms)Closed at 1733113453208 (+1 ms) 2024-12-02T04:24:13,210 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed ff229ab504813d939397799e3f1620d8 2024-12-02T04:24:13,210 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:24:13,210 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:13,210 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing dc631b2f75a4e7bf5ab5ea2acc4850f5, disabling compactions & flushes 2024-12-02T04:24:13,211 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:24:13,211 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:24:13,211 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. after waiting 0 ms 2024-12-02T04:24:13,211 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:24:13,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=ff229ab504813d939397799e3f1620d8, regionState=CLOSED 2024-12-02T04:24:13,216 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T04:24:13,216 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:13,217 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5. 2024-12-02T04:24:13,217 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for dc631b2f75a4e7bf5ab5ea2acc4850f5: Waiting for close lock at 1733113453210Running coprocessor pre-close hooks at 1733113453210Disabling compacts and flushes for region at 1733113453210Disabling writes for close at 1733113453211 (+1 ms)Writing region close event to WAL at 1733113453211Running coprocessor post-close hooks at 1733113453216 (+5 ms)Closed at 1733113453217 (+1 ms) 2024-12-02T04:24:13,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:13,219 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:24:13,219 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=dc631b2f75a4e7bf5ab5ea2acc4850f5, regionState=CLOSED 2024-12-02T04:24:13,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:13,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-02T04:24:13,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure ff229ab504813d939397799e3f1620d8, server=e2eaa0f11f7e,37413,1733113342217 in 171 msec 2024-12-02T04:24:13,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ff229ab504813d939397799e3f1620d8, UNASSIGN in 181 msec 2024-12-02T04:24:13,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-02T04:24:13,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure dc631b2f75a4e7bf5ab5ea2acc4850f5, server=e2eaa0f11f7e,37413,1733113342217 in 175 msec 2024-12-02T04:24:13,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-02T04:24:13,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=dc631b2f75a4e7bf5ab5ea2acc4850f5, UNASSIGN in 186 msec 2024-12-02T04:24:13,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-02T04:24:13,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 195 msec 2024-12-02T04:24:13,235 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113453235"}]},"ts":"1733113453235"} 2024-12-02T04:24:13,237 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T04:24:13,237 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T04:24:13,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 210 msec 2024-12-02T04:24:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T04:24:13,348 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:24:13,351 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,355 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,357 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,360 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,366 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:24:13,366 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8 2024-12-02T04:24:13,366 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:24:13,369 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/recovered.edits] 2024-12-02T04:24:13,369 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/recovered.edits] 2024-12-02T04:24:13,369 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/recovered.edits] 2024-12-02T04:24:13,375 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:24:13,375 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_.15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:24:13,376 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/cf/07f01abed2f1469c9d248e0936c0a3cb_SeqId_4_ 2024-12-02T04:24:13,379 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/recovered.edits/10.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8/recovered.edits/10.seqid 2024-12-02T04:24:13,379 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/recovered.edits/6.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf/recovered.edits/6.seqid 2024-12-02T04:24:13,379 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/recovered.edits/10.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5/recovered.edits/10.seqid 2024-12-02T04:24:13,380 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/ff229ab504813d939397799e3f1620d8 2024-12-02T04:24:13,380 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/15127f40b26897f83013d0d54e4adbdf 2024-12-02T04:24:13,380 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportFileSystemStateWithSplitRegion/dc631b2f75a4e7bf5ab5ea2acc4850f5 2024-12-02T04:24:13,380 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-02T04:24:13,382 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-02T04:24:13,392 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T04:24:13,395 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T04:24:13,397 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,397 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T04:24:13,397 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113453397"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,398 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113453397"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,398 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113453397"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,401 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-02T04:24:13,401 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 15127f40b26897f83013d0d54e4adbdf, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113353513.15127f40b26897f83013d0d54e4adbdf.', STARTKEY => '', ENDKEY => ''}, {ENCODED => ff229ab504813d939397799e3f1620d8, NAME => 'testExportFileSystemStateWithSplitRegion,,1733113360367.ff229ab504813d939397799e3f1620d8.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => dc631b2f75a4e7bf5ab5ea2acc4850f5, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733113360367.dc631b2f75a4e7bf5ab5ea2acc4850f5.', STARTKEY => '5', ENDKEY => ''}] 2024-12-02T04:24:13,401 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T04:24:13,401 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113453401"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,404 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-02T04:24:13,405 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 54 msec 2024-12-02T04:24:13,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,427 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T04:24:13,427 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T04:24:13,427 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:24:13,427 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:24:13,427 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T04:24:13,427 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T04:24:13,427 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:24:13,427 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:24:13,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:13,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:13,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:13,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-02T04:24:13,429 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,430 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:24:13,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T04:24:13,434 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113453433"}]},"ts":"1733113453433"} 2024-12-02T04:24:13,435 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T04:24:13,435 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T04:24:13,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-02T04:24:13,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, UNASSIGN}] 2024-12-02T04:24:13,438 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, UNASSIGN 2024-12-02T04:24:13,438 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, UNASSIGN 2024-12-02T04:24:13,439 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=c23d8888dc0391efc20dff3e1fdfcba4, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:13,439 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=baecf5b14269cd0ea7532736ec6b3431, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:13,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, UNASSIGN because future has completed 2024-12-02T04:24:13,441 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:13,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:13,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, UNASSIGN because future has completed 2024-12-02T04:24:13,442 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:13,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T04:24:13,594 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:24:13,595 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:13,595 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing c23d8888dc0391efc20dff3e1fdfcba4, disabling compactions & flushes 2024-12-02T04:24:13,595 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:24:13,595 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:24:13,595 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. after waiting 0 ms 2024-12-02T04:24:13,595 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:24:13,595 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:24:13,596 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:13,596 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing baecf5b14269cd0ea7532736ec6b3431, disabling compactions & flushes 2024-12-02T04:24:13,596 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:24:13,596 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:24:13,596 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. after waiting 0 ms 2024-12-02T04:24:13,596 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:24:13,600 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:13,601 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:13,601 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:13,601 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:13,601 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4. 2024-12-02T04:24:13,601 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431. 2024-12-02T04:24:13,601 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for c23d8888dc0391efc20dff3e1fdfcba4: Waiting for close lock at 1733113453595Running coprocessor pre-close hooks at 1733113453595Disabling compacts and flushes for region at 1733113453595Disabling writes for close at 1733113453595Writing region close event to WAL at 1733113453596 (+1 ms)Running coprocessor post-close hooks at 1733113453601 (+5 ms)Closed at 1733113453601 2024-12-02T04:24:13,601 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for baecf5b14269cd0ea7532736ec6b3431: Waiting for close lock at 1733113453596Running coprocessor pre-close hooks at 1733113453596Disabling compacts and flushes for region at 1733113453596Disabling writes for close at 1733113453596Writing region close event to WAL at 1733113453597 (+1 ms)Running coprocessor post-close hooks at 1733113453601 (+4 ms)Closed at 1733113453601 2024-12-02T04:24:13,603 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:24:13,604 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=c23d8888dc0391efc20dff3e1fdfcba4, regionState=CLOSED 2024-12-02T04:24:13,604 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:24:13,605 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=baecf5b14269cd0ea7532736ec6b3431, regionState=CLOSED 2024-12-02T04:24:13,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:13,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:13,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-02T04:24:13,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure c23d8888dc0391efc20dff3e1fdfcba4, server=e2eaa0f11f7e,41643,1733113342088 in 167 msec 2024-12-02T04:24:13,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-12-02T04:24:13,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=c23d8888dc0391efc20dff3e1fdfcba4, UNASSIGN in 173 msec 2024-12-02T04:24:13,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure baecf5b14269cd0ea7532736ec6b3431, server=e2eaa0f11f7e,34291,1733113342258 in 167 msec 2024-12-02T04:24:13,613 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-02T04:24:13,613 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=baecf5b14269cd0ea7532736ec6b3431, UNASSIGN in 174 msec 2024-12-02T04:24:13,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-02T04:24:13,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 178 msec 2024-12-02T04:24:13,617 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113453617"}]},"ts":"1733113453617"} 2024-12-02T04:24:13,619 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T04:24:13,619 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T04:24:13,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 190 msec 2024-12-02T04:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T04:24:13,748 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:24:13,749 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,751 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,751 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,755 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,756 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:24:13,756 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:24:13,759 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/recovered.edits] 2024-12-02T04:24:13,760 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/recovered.edits] 2024-12-02T04:24:13,764 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/cf/9795175e957b41fcb2f3ff4445e1fce3 2024-12-02T04:24:13,765 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/cf/4192bbbf157a4cbbbca367ef1f90feaf 2024-12-02T04:24:13,767 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4/recovered.edits/9.seqid 2024-12-02T04:24:13,768 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:24:13,768 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431/recovered.edits/9.seqid 2024-12-02T04:24:13,768 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSplitRegion/baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:24:13,768 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-02T04:24:13,769 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-02T04:24:13,770 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-02T04:24:13,773 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b2024120226db01fdcf844c36be67d5f5cf4b55c8_baecf5b14269cd0ea7532736ec6b3431 2024-12-02T04:24:13,774 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202dc6b1aea21b44015aab276197bb6b1b0_c23d8888dc0391efc20dff3e1fdfcba4 2024-12-02T04:24:13,774 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-02T04:24:13,777 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,779 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T04:24:13,782 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T04:24:13,784 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,784 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T04:24:13,784 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113453784"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,784 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113453784"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,786 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:24:13,787 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c23d8888dc0391efc20dff3e1fdfcba4, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733113350628.c23d8888dc0391efc20dff3e1fdfcba4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => baecf5b14269cd0ea7532736ec6b3431, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733113350628.baecf5b14269cd0ea7532736ec6b3431.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:24:13,787 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T04:24:13,787 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113453787"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:13,789 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-02T04:24:13,791 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 42 msec 2024-12-02T04:24:13,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,802 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T04:24:13,802 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T04:24:13,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T04:24:13,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-02T04:24:13,812 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,812 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T04:24:13,827 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T04:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,832 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T04:24:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,836 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T04:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:13,918 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=764 (was 719) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:54306 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44649 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1405 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_488547829_1 at /127.0.0.1:51984 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:39186 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:44649 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:48620 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_488547829_1 at /127.0.0.1:39164 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 120530) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=418 (was 329) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=368 (was 7237) 2024-12-02T04:24:13,918 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-12-02T04:24:13,933 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=764, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=418, ProcessCount=22, AvailableMemoryMB=367 2024-12-02T04:24:13,934 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-12-02T04:24:13,935 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:24:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:13,937 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:24:13,937 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-02T04:24:13,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T04:24:13,938 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:24:13,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741911_1087 (size=442) 2024-12-02T04:24:13,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741911_1087 (size=442) 2024-12-02T04:24:13,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741911_1087 (size=442) 2024-12-02T04:24:13,949 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e560e5bb386017b6375814c96ac2089a, NAME => 'testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:13,950 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 29c4bfb474cb7d4f861ae74e0e76f8fb, NAME => 'testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:13,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741912_1088 (size=67) 2024-12-02T04:24:13,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741912_1088 (size=67) 2024-12-02T04:24:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741912_1088 (size=67) 2024-12-02T04:24:13,959 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:13,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 29c4bfb474cb7d4f861ae74e0e76f8fb, disabling compactions & flushes 2024-12-02T04:24:13,960 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:13,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:13,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. after waiting 0 ms 2024-12-02T04:24:13,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:13,960 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:13,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 29c4bfb474cb7d4f861ae74e0e76f8fb: Waiting for close lock at 1733113453959Disabling compacts and flushes for region at 1733113453959Disabling writes for close at 1733113453960 (+1 ms)Writing region close event to WAL at 1733113453960Closed at 1733113453960 2024-12-02T04:24:13,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741913_1089 (size=67) 2024-12-02T04:24:13,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741913_1089 (size=67) 2024-12-02T04:24:13,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741913_1089 (size=67) 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing e560e5bb386017b6375814c96ac2089a, disabling compactions & flushes 2024-12-02T04:24:13,966 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. after waiting 0 ms 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:13,966 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:13,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for e560e5bb386017b6375814c96ac2089a: Waiting for close lock at 1733113453966Disabling compacts and flushes for region at 1733113453966Disabling writes for close at 1733113453966Writing region close event to WAL at 1733113453966Closed at 1733113453966 2024-12-02T04:24:13,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:24:13,968 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733113453967"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113453967"}]},"ts":"1733113453967"} 2024-12-02T04:24:13,968 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733113453967"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113453967"}]},"ts":"1733113453967"} 2024-12-02T04:24:13,971 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:24:13,972 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:24:13,972 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113453972"}]},"ts":"1733113453972"} 2024-12-02T04:24:13,974 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-02T04:24:13,974 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:24:13,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:24:13,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:24:13,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:24:13,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:24:13,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, ASSIGN}] 2024-12-02T04:24:13,978 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, ASSIGN 2024-12-02T04:24:13,978 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, ASSIGN 2024-12-02T04:24:13,979 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:24:13,979 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:24:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T04:24:14,129 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:24:14,130 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=e560e5bb386017b6375814c96ac2089a, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:14,130 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=29c4bfb474cb7d4f861ae74e0e76f8fb, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:14,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, ASSIGN because future has completed 2024-12-02T04:24:14,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:14,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, ASSIGN because future has completed 2024-12-02T04:24:14,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:14,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T04:24:14,288 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => e560e5bb386017b6375814c96ac2089a, NAME => 'testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:24:14,289 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:14,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. service=AccessControlService 2024-12-02T04:24:14,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 29c4bfb474cb7d4f861ae74e0e76f8fb, NAME => 'testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:24:14,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. service=AccessControlService 2024-12-02T04:24:14,289 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:14,290 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,290 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,297 INFO [StoreOpener-29c4bfb474cb7d4f861ae74e0e76f8fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,297 INFO [StoreOpener-e560e5bb386017b6375814c96ac2089a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,299 INFO [StoreOpener-29c4bfb474cb7d4f861ae74e0e76f8fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29c4bfb474cb7d4f861ae74e0e76f8fb columnFamilyName cf 2024-12-02T04:24:14,299 INFO [StoreOpener-e560e5bb386017b6375814c96ac2089a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e560e5bb386017b6375814c96ac2089a columnFamilyName cf 2024-12-02T04:24:14,300 DEBUG [StoreOpener-e560e5bb386017b6375814c96ac2089a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:14,300 DEBUG [StoreOpener-29c4bfb474cb7d4f861ae74e0e76f8fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:14,301 INFO [StoreOpener-e560e5bb386017b6375814c96ac2089a-1 {}] regionserver.HStore(327): Store=e560e5bb386017b6375814c96ac2089a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:14,301 INFO [StoreOpener-29c4bfb474cb7d4f861ae74e0e76f8fb-1 {}] regionserver.HStore(327): Store=29c4bfb474cb7d4f861ae74e0e76f8fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:14,301 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,301 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,302 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,302 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,302 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,303 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,303 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,303 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,303 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,303 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,304 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,306 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,307 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:14,307 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened e560e5bb386017b6375814c96ac2089a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60626824, jitterRate=-0.09658992290496826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:14,308 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,308 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:14,308 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for e560e5bb386017b6375814c96ac2089a: Running coprocessor pre-open hook at 1733113454290Writing region info on filesystem at 1733113454290Initializing all the Stores at 1733113454295 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113454295Cleaning up temporary data from old regions at 1733113454303 (+8 ms)Running coprocessor post-open hooks at 1733113454308 (+5 ms)Region opened successfully at 1733113454308 2024-12-02T04:24:14,308 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 29c4bfb474cb7d4f861ae74e0e76f8fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64069831, jitterRate=-0.045285120606422424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:14,308 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,309 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 29c4bfb474cb7d4f861ae74e0e76f8fb: Running coprocessor pre-open hook at 1733113454290Writing region info on filesystem at 1733113454290Initializing all the Stores at 1733113454295 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113454295Cleaning up temporary data from old regions at 1733113454303 (+8 ms)Running coprocessor post-open hooks at 1733113454308 (+5 ms)Region opened successfully at 1733113454308 2024-12-02T04:24:14,309 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb., pid=49, masterSystemTime=1733113454286 2024-12-02T04:24:14,309 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a., pid=48, masterSystemTime=1733113454284 2024-12-02T04:24:14,312 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:14,312 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:14,312 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=29c4bfb474cb7d4f861ae74e0e76f8fb, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:14,313 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,313 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,314 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=e560e5bb386017b6375814c96ac2089a, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:14,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:14,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:14,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-02T04:24:14,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217 in 187 msec 2024-12-02T04:24:14,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-02T04:24:14,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088 in 186 msec 2024-12-02T04:24:14,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, ASSIGN in 348 msec 2024-12-02T04:24:14,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-02T04:24:14,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, ASSIGN in 349 msec 2024-12-02T04:24:14,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:24:14,328 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113454328"}]},"ts":"1733113454328"} 2024-12-02T04:24:14,330 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-02T04:24:14,331 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:24:14,331 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-02T04:24:14,336 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T04:24:14,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:14,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:14,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:14,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:14,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:14,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:14,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:14,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:14,401 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 464 msec 2024-12-02T04:24:14,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T04:24:14,568 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T04:24:14,568 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,571 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-02T04:24:14,571 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,571 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:14,573 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,580 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,582 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44890, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,586 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T04:24:14,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113454591 (current time:1733113454591). 2024-12-02T04:24:14,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:24:14,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T04:24:14,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:24:14,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8d5a77, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:14,593 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:14,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:14,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:14,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@379675fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:14,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:14,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,594 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:14,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de05736, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:14,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:14,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,597 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:14,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:14,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,599 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:14,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18b899f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:14,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:14,601 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:14,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:14,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:14,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fd93180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:14,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:14,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,602 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:14,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44afd8ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:14,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:14,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59282, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:24:14,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,609 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,610 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T04:24:14,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:24:14,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T04:24:14,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T04:24:14,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T04:24:14,615 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:24:14,617 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:24:14,620 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:24:14,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741914_1090 (size=167) 2024-12-02T04:24:14,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741914_1090 (size=167) 2024-12-02T04:24:14,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741914_1090 (size=167) 2024-12-02T04:24:14,628 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:24:14,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb}] 2024-12-02T04:24:14,629 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,630 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T04:24:14,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-02T04:24:14,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-02T04:24:14,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for e560e5bb386017b6375814c96ac2089a: 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 29c4bfb474cb7d4f861ae74e0e76f8fb: 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:14,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:14,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:14,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:14,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:24:14,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:24:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741915_1091 (size=70) 2024-12-02T04:24:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741915_1091 (size=70) 2024-12-02T04:24:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741915_1091 (size=70) 2024-12-02T04:24:14,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:14,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-02T04:24:14,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-02T04:24:14,796 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,797 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:14,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741916_1092 (size=70) 2024-12-02T04:24:14,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741916_1092 (size=70) 2024-12-02T04:24:14,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741916_1092 (size=70) 2024-12-02T04:24:14,799 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,799 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-02T04:24:14,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb in 169 msec 2024-12-02T04:24:14,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-02T04:24:14,800 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,800 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-02T04:24:14,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a in 172 msec 2024-12-02T04:24:14,802 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:24:14,803 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:24:14,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:24:14,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:24:14,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:14,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:24:14,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741917_1093 (size=62) 2024-12-02T04:24:14,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741917_1093 (size=62) 2024-12-02T04:24:14,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741917_1093 (size=62) 2024-12-02T04:24:14,814 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:24:14,814 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:14,814 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:14,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741918_1094 (size=649) 2024-12-02T04:24:14,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741918_1094 (size=649) 2024-12-02T04:24:14,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741918_1094 (size=649) 2024-12-02T04:24:14,829 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:24:14,835 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:24:14,835 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:14,837 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:24:14,837 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T04:24:14,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 225 msec 2024-12-02T04:24:14,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T04:24:14,929 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T04:24:14,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:14,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:14,939 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,942 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-02T04:24:14,943 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:14,943 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:14,945 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,950 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,959 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T04:24:14,962 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T04:24:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113454962 (current time:1733113454962). 2024-12-02T04:24:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:24:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T04:24:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:24:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a76d37c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:14,963 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6423be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:14,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,965 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:14,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51982ae6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:14,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:14,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,969 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,969 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf2bfb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:14,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:14,970 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:14,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:14,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:14,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a5fafc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:14,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:14,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,971 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60630, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:14,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79556f35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:14,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:14,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,974 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:24:14,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:14,977 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:14,978 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:14,979 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T04:24:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:24:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T04:24:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T04:24:14,981 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:24:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T04:24:14,982 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:24:14,984 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:24:14,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741919_1095 (size=162) 2024-12-02T04:24:14,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741919_1095 (size=162) 2024-12-02T04:24:14,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741919_1095 (size=162) 2024-12-02T04:24:14,992 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:24:14,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb}] 2024-12-02T04:24:14,993 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:14,993 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T04:24:15,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-02T04:24:15,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-02T04:24:15,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:15,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:15,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing e560e5bb386017b6375814c96ac2089a 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-02T04:24:15,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 29c4bfb474cb7d4f861ae74e0e76f8fb 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-02T04:24:15,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a is 71, key is 05a5d2191899870859a7ade514f4c97e/cf:q/1733113454935/Put/seqid=0 2024-12-02T04:24:15,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb is 71, key is 12a04cd3b1d1db8098c830e4bb6e75fc/cf:q/1733113454937/Put/seqid=0 2024-12-02T04:24:15,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741920_1096 (size=5311) 2024-12-02T04:24:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741920_1096 (size=5311) 2024-12-02T04:24:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741920_1096 (size=5311) 2024-12-02T04:24:15,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:15,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741921_1097 (size=7962) 2024-12-02T04:24:15,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741921_1097 (size=7962) 2024-12-02T04:24:15,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741921_1097 (size=7962) 2024-12-02T04:24:15,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:15,178 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:15,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/.tmp/cf/2fcdae2371214633951aa6e32789d66e, store: [table=testtb-testExportWithTargetName family=cf region=e560e5bb386017b6375814c96ac2089a] 2024-12-02T04:24:15,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/.tmp/cf/2fcdae2371214633951aa6e32789d66e is 208, key is 06248039fc5ebce148b565bd12dbca8bd/cf:q/1733113454935/Put/seqid=0 2024-12-02T04:24:15,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:15,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/.tmp/cf/7dc7e536a1b24064b5281549cf19d1ab, store: [table=testtb-testExportWithTargetName family=cf region=29c4bfb474cb7d4f861ae74e0e76f8fb] 2024-12-02T04:24:15,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/.tmp/cf/7dc7e536a1b24064b5281549cf19d1ab is 208, key is 1055baf710ecde0ec3e00465d4017a51a/cf:q/1733113454937/Put/seqid=0 2024-12-02T04:24:15,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741922_1098 (size=6524) 2024-12-02T04:24:15,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741922_1098 (size=6524) 2024-12-02T04:24:15,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741922_1098 (size=6524) 2024-12-02T04:24:15,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/.tmp/cf/2fcdae2371214633951aa6e32789d66e 2024-12-02T04:24:15,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741923_1099 (size=14339) 2024-12-02T04:24:15,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741923_1099 (size=14339) 2024-12-02T04:24:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741923_1099 (size=14339) 2024-12-02T04:24:15,191 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/.tmp/cf/7dc7e536a1b24064b5281549cf19d1ab 2024-12-02T04:24:15,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/.tmp/cf/2fcdae2371214633951aa6e32789d66e as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e 2024-12-02T04:24:15,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/.tmp/cf/7dc7e536a1b24064b5281549cf19d1ab as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab 2024-12-02T04:24:15,200 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e, entries=6, sequenceid=6, filesize=6.4 K 2024-12-02T04:24:15,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for e560e5bb386017b6375814c96ac2089a in 56ms, sequenceid=6, compaction requested=false 2024-12-02T04:24:15,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for e560e5bb386017b6375814c96ac2089a: 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. for snaptb0-testExportWithTargetName completed. 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e] hfiles 2024-12-02T04:24:15,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e for snapshot=snaptb0-testExportWithTargetName 2024-12-02T04:24:15,202 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab, entries=44, sequenceid=6, filesize=14.0 K 2024-12-02T04:24:15,203 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 29c4bfb474cb7d4f861ae74e0e76f8fb in 58ms, sequenceid=6, compaction requested=false 2024-12-02T04:24:15,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 29c4bfb474cb7d4f861ae74e0e76f8fb: 2024-12-02T04:24:15,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. for snaptb0-testExportWithTargetName completed. 2024-12-02T04:24:15,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T04:24:15,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:15,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab] hfiles 2024-12-02T04:24:15,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab for snapshot=snaptb0-testExportWithTargetName 2024-12-02T04:24:15,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741924_1100 (size=109) 2024-12-02T04:24:15,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741924_1100 (size=109) 2024-12-02T04:24:15,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741924_1100 (size=109) 2024-12-02T04:24:15,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:15,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-02T04:24:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-02T04:24:15,216 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:15,216 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:15,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e560e5bb386017b6375814c96ac2089a in 225 msec 2024-12-02T04:24:15,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741925_1101 (size=109) 2024-12-02T04:24:15,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741925_1101 (size=109) 2024-12-02T04:24:15,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741925_1101 (size=109) 2024-12-02T04:24:15,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:15,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-02T04:24:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-02T04:24:15,221 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:15,221 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:15,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-02T04:24:15,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb in 230 msec 2024-12-02T04:24:15,224 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:24:15,225 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:24:15,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:24:15,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:24:15,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:15,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a] hfiles 2024-12-02T04:24:15,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:15,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:15,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741926_1102 (size=293) 2024-12-02T04:24:15,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741926_1102 (size=293) 2024-12-02T04:24:15,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741926_1102 (size=293) 2024-12-02T04:24:15,237 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:24:15,237 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-02T04:24:15,237 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-02T04:24:15,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741927_1103 (size=959) 2024-12-02T04:24:15,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741927_1103 (size=959) 2024-12-02T04:24:15,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741927_1103 (size=959) 2024-12-02T04:24:15,252 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:24:15,259 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:24:15,260 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T04:24:15,261 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:24:15,261 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T04:24:15,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 281 msec 2024-12-02T04:24:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T04:24:15,298 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T04:24:15,298 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298 2024-12-02T04:24:15,299 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:15,329 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:15,329 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T04:24:15,331 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:24:15,337 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T04:24:15,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741929_1105 (size=959) 2024-12-02T04:24:15,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741929_1105 (size=959) 2024-12-02T04:24:15,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741928_1104 (size=162) 2024-12-02T04:24:15,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741928_1104 (size=162) 2024-12-02T04:24:15,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741928_1104 (size=162) 2024-12-02T04:24:15,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741929_1105 (size=959) 2024-12-02T04:24:15,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741930_1106 (size=154) 2024-12-02T04:24:15,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741930_1106 (size=154) 2024-12-02T04:24:15,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741930_1106 (size=154) 2024-12-02T04:24:15,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:15,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:15,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-14361348795280154274.jar 2024-12-02T04:24:16,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-14729317488671032713.jar 2024-12-02T04:24:16,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:16,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:24:16,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:24:16,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:24:16,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:24:16,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:24:16,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:24:16,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:24:16,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:24:16,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:24:16,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:24:16,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:24:16,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:16,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:16,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:16,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:16,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:16,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:16,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741931_1107 (size=131440) 2024-12-02T04:24:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741931_1107 (size=131440) 2024-12-02T04:24:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741931_1107 (size=131440) 2024-12-02T04:24:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T04:24:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T04:24:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T04:24:16,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T04:24:16,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T04:24:16,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T04:24:16,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741934_1110 (size=903927) 2024-12-02T04:24:16,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741934_1110 (size=903927) 2024-12-02T04:24:16,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741934_1110 (size=903927) 2024-12-02T04:24:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T04:24:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T04:24:16,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T04:24:16,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741936_1112 (size=6424732) 2024-12-02T04:24:16,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741936_1112 (size=6424732) 2024-12-02T04:24:16,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741936_1112 (size=6424732) 2024-12-02T04:24:16,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741937_1113 (size=1877034) 2024-12-02T04:24:16,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741937_1113 (size=1877034) 2024-12-02T04:24:16,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741937_1113 (size=1877034) 2024-12-02T04:24:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741938_1114 (size=77835) 2024-12-02T04:24:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741938_1114 (size=77835) 2024-12-02T04:24:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741938_1114 (size=77835) 2024-12-02T04:24:16,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741939_1115 (size=30949) 2024-12-02T04:24:16,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741939_1115 (size=30949) 2024-12-02T04:24:16,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741939_1115 (size=30949) 2024-12-02T04:24:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T04:24:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T04:24:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T04:24:16,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T04:24:16,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T04:24:16,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T04:24:16,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741942_1118 (size=232957) 2024-12-02T04:24:16,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741942_1118 (size=232957) 2024-12-02T04:24:16,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741942_1118 (size=232957) 2024-12-02T04:24:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741943_1119 (size=127628) 2024-12-02T04:24:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741943_1119 (size=127628) 2024-12-02T04:24:16,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741943_1119 (size=127628) 2024-12-02T04:24:16,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741944_1120 (size=20406) 2024-12-02T04:24:16,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741944_1120 (size=20406) 2024-12-02T04:24:16,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741944_1120 (size=20406) 2024-12-02T04:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T04:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T04:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T04:24:16,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741946_1122 (size=217634) 2024-12-02T04:24:16,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741946_1122 (size=217634) 2024-12-02T04:24:16,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741946_1122 (size=217634) 2024-12-02T04:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741947_1123 (size=1832290) 2024-12-02T04:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741947_1123 (size=1832290) 2024-12-02T04:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741947_1123 (size=1832290) 2024-12-02T04:24:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741948_1124 (size=322274) 2024-12-02T04:24:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741948_1124 (size=322274) 2024-12-02T04:24:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741948_1124 (size=322274) 2024-12-02T04:24:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741949_1125 (size=503880) 2024-12-02T04:24:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741949_1125 (size=503880) 2024-12-02T04:24:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741949_1125 (size=503880) 2024-12-02T04:24:16,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741950_1126 (size=29229) 2024-12-02T04:24:16,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741950_1126 (size=29229) 2024-12-02T04:24:16,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741950_1126 (size=29229) 2024-12-02T04:24:16,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741951_1127 (size=24096) 2024-12-02T04:24:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741951_1127 (size=24096) 2024-12-02T04:24:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741951_1127 (size=24096) 2024-12-02T04:24:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741952_1128 (size=111872) 2024-12-02T04:24:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741952_1128 (size=111872) 2024-12-02T04:24:16,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741952_1128 (size=111872) 2024-12-02T04:24:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741953_1129 (size=440957) 2024-12-02T04:24:16,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741953_1129 (size=440957) 2024-12-02T04:24:16,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741953_1129 (size=440957) 2024-12-02T04:24:16,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741954_1130 (size=45609) 2024-12-02T04:24:16,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741954_1130 (size=45609) 2024-12-02T04:24:16,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741954_1130 (size=45609) 2024-12-02T04:24:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741955_1131 (size=136454) 2024-12-02T04:24:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741955_1131 (size=136454) 2024-12-02T04:24:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741955_1131 (size=136454) 2024-12-02T04:24:16,739 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:24:16,741 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-02T04:24:16,744 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.0 K 2024-12-02T04:24:16,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-02T04:24:16,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.4 K 2024-12-02T04:24:16,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-12-02T04:24:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741956_1132 (size=1031) 2024-12-02T04:24:16,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741956_1132 (size=1031) 2024-12-02T04:24:16,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741956_1132 (size=1031) 2024-12-02T04:24:16,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741957_1133 (size=35) 2024-12-02T04:24:16,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741957_1133 (size=35) 2024-12-02T04:24:16,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741957_1133 (size=35) 2024-12-02T04:24:16,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741958_1134 (size=304003) 2024-12-02T04:24:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741958_1134 (size=304003) 2024-12-02T04:24:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741958_1134 (size=304003) 2024-12-02T04:24:17,339 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:24:17,339 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:24:17,342 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0001_000001 (auth:SIMPLE) from 127.0.0.1:58242 2024-12-02T04:24:17,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000001/launch_container.sh] 2024-12-02T04:24:17,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000001/container_tokens] 2024-12-02T04:24:17,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0001/container_1733113348655_0001_01_000001/sysfs] 2024-12-02T04:24:18,075 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:42724 2024-12-02T04:24:18,740 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:24:20,562 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:24:21,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T04:24:21,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-02T04:24:21,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:21,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T04:24:23,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:45370 2024-12-02T04:24:23,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741959_1135 (size=349701) 2024-12-02T04:24:23,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741959_1135 (size=349701) 2024-12-02T04:24:23,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741959_1135 (size=349701) 2024-12-02T04:24:25,185 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e560e5bb386017b6375814c96ac2089a changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:24:25,185 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 29c4bfb474cb7d4f861ae74e0e76f8fb changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:24:25,583 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:41320 2024-12-02T04:24:25,584 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:46202 2024-12-02T04:24:26,432 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:46216 2024-12-02T04:24:26,440 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:41326 2024-12-02T04:24:27,180 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:24:29,343 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:24:31,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741960_1136 (size=14339) 2024-12-02T04:24:31,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741960_1136 (size=14339) 2024-12-02T04:24:31,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741960_1136 (size=14339) 2024-12-02T04:24:31,408 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000002/launch_container.sh] 2024-12-02T04:24:31,408 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000002/container_tokens] 2024-12-02T04:24:31,409 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000002/sysfs] 2024-12-02T04:24:32,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741962_1138 (size=6524) 2024-12-02T04:24:32,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741962_1138 (size=6524) 2024-12-02T04:24:32,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741962_1138 (size=6524) 2024-12-02T04:24:33,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741963_1139 (size=5311) 2024-12-02T04:24:33,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741963_1139 (size=5311) 2024-12-02T04:24:33,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741963_1139 (size=5311) 2024-12-02T04:24:33,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741964_1140 (size=7962) 2024-12-02T04:24:33,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741964_1140 (size=7962) 2024-12-02T04:24:33,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741964_1140 (size=7962) 2024-12-02T04:24:33,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741961_1137 (size=31741) 2024-12-02T04:24:33,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741961_1137 (size=31741) 2024-12-02T04:24:33,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741961_1137 (size=31741) 2024-12-02T04:24:33,559 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000005/launch_container.sh] 2024-12-02T04:24:33,559 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000005/container_tokens] 2024-12-02T04:24:33,559 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000005/sysfs] 2024-12-02T04:24:33,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741965_1141 (size=465) 2024-12-02T04:24:33,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741965_1141 (size=465) 2024-12-02T04:24:33,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741965_1141 (size=465) 2024-12-02T04:24:33,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000003/launch_container.sh] 2024-12-02T04:24:33,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000003/container_tokens] 2024-12-02T04:24:33,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000003/sysfs] 2024-12-02T04:24:34,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741966_1142 (size=31741) 2024-12-02T04:24:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741966_1142 (size=31741) 2024-12-02T04:24:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741966_1142 (size=31741) 2024-12-02T04:24:34,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741967_1143 (size=349701) 2024-12-02T04:24:34,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741967_1143 (size=349701) 2024-12-02T04:24:34,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741967_1143 (size=349701) 2024-12-02T04:24:34,256 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:34008 2024-12-02T04:24:34,292 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:34014 2024-12-02T04:24:36,006 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:24:36,008 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:24:36,021 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-02T04:24:36,021 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:24:36,023 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:24:36,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T04:24:36,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-02T04:24:36,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-02T04:24:36,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/testExportWithTargetName 2024-12-02T04:24:36,024 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-02T04:24:36,024 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113455298/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-02T04:24:36,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-02T04:24:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T04:24:36,035 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113476035"}]},"ts":"1733113476035"} 2024-12-02T04:24:36,037 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-02T04:24:36,037 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-02T04:24:36,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-02T04:24:36,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, UNASSIGN}] 2024-12-02T04:24:36,041 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, UNASSIGN 2024-12-02T04:24:36,041 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, UNASSIGN 2024-12-02T04:24:36,042 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=e560e5bb386017b6375814c96ac2089a, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:36,042 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=29c4bfb474cb7d4f861ae74e0e76f8fb, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:36,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, UNASSIGN because future has completed 2024-12-02T04:24:36,044 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:36,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:36,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, UNASSIGN because future has completed 2024-12-02T04:24:36,045 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:36,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T04:24:36,198 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing e560e5bb386017b6375814c96ac2089a, disabling compactions & flushes 2024-12-02T04:24:36,198 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. after waiting 0 ms 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:36,198 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:36,198 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 29c4bfb474cb7d4f861ae74e0e76f8fb, disabling compactions & flushes 2024-12-02T04:24:36,199 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:36,199 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:36,199 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. after waiting 0 ms 2024-12-02T04:24:36,199 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:36,209 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:36,210 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:36,210 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb. 2024-12-02T04:24:36,210 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 29c4bfb474cb7d4f861ae74e0e76f8fb: Waiting for close lock at 1733113476198Running coprocessor pre-close hooks at 1733113476198Disabling compacts and flushes for region at 1733113476198Disabling writes for close at 1733113476199 (+1 ms)Writing region close event to WAL at 1733113476200 (+1 ms)Running coprocessor post-close hooks at 1733113476210 (+10 ms)Closed at 1733113476210 2024-12-02T04:24:36,211 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:36,212 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:36,212 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a. 2024-12-02T04:24:36,212 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for e560e5bb386017b6375814c96ac2089a: Waiting for close lock at 1733113476198Running coprocessor pre-close hooks at 1733113476198Disabling compacts and flushes for region at 1733113476198Disabling writes for close at 1733113476198Writing region close event to WAL at 1733113476199 (+1 ms)Running coprocessor post-close hooks at 1733113476212 (+13 ms)Closed at 1733113476212 2024-12-02T04:24:36,212 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:36,213 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=29c4bfb474cb7d4f861ae74e0e76f8fb, regionState=CLOSED 2024-12-02T04:24:36,214 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:36,214 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=e560e5bb386017b6375814c96ac2089a, regionState=CLOSED 2024-12-02T04:24:36,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:36,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:36,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-02T04:24:36,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 29c4bfb474cb7d4f861ae74e0e76f8fb, server=e2eaa0f11f7e,41643,1733113342088 in 171 msec 2024-12-02T04:24:36,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-02T04:24:36,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure e560e5bb386017b6375814c96ac2089a, server=e2eaa0f11f7e,37413,1733113342217 in 174 msec 2024-12-02T04:24:36,221 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29c4bfb474cb7d4f861ae74e0e76f8fb, UNASSIGN in 178 msec 2024-12-02T04:24:36,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=58, resume processing ppid=57 2024-12-02T04:24:36,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e560e5bb386017b6375814c96ac2089a, UNASSIGN in 179 msec 2024-12-02T04:24:36,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-02T04:24:36,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 185 msec 2024-12-02T04:24:36,227 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113476226"}]},"ts":"1733113476226"} 2024-12-02T04:24:36,229 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-02T04:24:36,229 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-02T04:24:36,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 199 msec 2024-12-02T04:24:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T04:24:36,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T04:24:36,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-02T04:24:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,361 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-02T04:24:36,362 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,366 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-02T04:24:36,369 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:36,369 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:36,371 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/recovered.edits] 2024-12-02T04:24:36,372 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/recovered.edits] 2024-12-02T04:24:36,377 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/cf/7dc7e536a1b24064b5281549cf19d1ab 2024-12-02T04:24:36,378 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/cf/2fcdae2371214633951aa6e32789d66e 2024-12-02T04:24:36,385 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb/recovered.edits/9.seqid 2024-12-02T04:24:36,388 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:36,389 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a/recovered.edits/9.seqid 2024-12-02T04:24:36,390 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithTargetName/e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:36,390 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-02T04:24:36,391 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-02T04:24:36,392 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-02T04:24:36,397 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202a640435290c64aad9b9ee31564df4786_29c4bfb474cb7d4f861ae74e0e76f8fb 2024-12-02T04:24:36,399 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412029eb5cbb0376246e8a6180af8e2bdd67a_e560e5bb386017b6375814c96ac2089a 2024-12-02T04:24:36,400 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-02T04:24:36,402 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,405 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-02T04:24:36,408 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-02T04:24:36,410 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,410 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-02T04:24:36,410 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113476410"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:36,410 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113476410"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:36,413 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:24:36,413 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e560e5bb386017b6375814c96ac2089a, NAME => 'testtb-testExportWithTargetName,,1733113453935.e560e5bb386017b6375814c96ac2089a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 29c4bfb474cb7d4f861ae74e0e76f8fb, NAME => 'testtb-testExportWithTargetName,1,1733113453935.29c4bfb474cb7d4f861ae74e0e76f8fb.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:24:36,413 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-02T04:24:36,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113476413"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:36,416 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-02T04:24:36,416 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T04:24:36,418 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 58 msec 2024-12-02T04:24:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T04:24:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T04:24:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T04:24:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-02T04:24:36,445 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-02T04:24:36,445 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T04:24:36,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T04:24:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-02T04:24:36,457 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T04:24:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-02T04:24:36,486 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=797 (was 764) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2089 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1533679707_1 at /127.0.0.1:34422 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125085) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39969 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:53358 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39969 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1533679707_1 at /127.0.0.1:33990 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:34010 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40779 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:34426 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39969 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:40779 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45453 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39969 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 811) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=629 (was 418) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 22), AvailableMemoryMB=2030 (was 367) - AvailableMemoryMB LEAK? - 2024-12-02T04:24:36,486 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-02T04:24:36,509 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=797, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=629, ProcessCount=22, AvailableMemoryMB=2028 2024-12-02T04:24:36,509 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-02T04:24:36,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:24:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:36,512 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:24:36,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-02T04:24:36,514 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:24:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T04:24:36,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741968_1144 (size=440) 2024-12-02T04:24:36,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741968_1144 (size=440) 2024-12-02T04:24:36,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741968_1144 (size=440) 2024-12-02T04:24:36,526 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a8a75a2c72a1835b796320d54112331e, NAME => 'testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:36,526 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6200c5eb2c04b1d4f3b9574ebc3b9096, NAME => 'testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:36,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741970_1146 (size=65) 2024-12-02T04:24:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741970_1146 (size=65) 2024-12-02T04:24:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741970_1146 (size=65) 2024-12-02T04:24:36,538 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:36,538 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 6200c5eb2c04b1d4f3b9574ebc3b9096, disabling compactions & flushes 2024-12-02T04:24:36,538 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,538 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,538 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. after waiting 0 ms 2024-12-02T04:24:36,538 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,538 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,539 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6200c5eb2c04b1d4f3b9574ebc3b9096: Waiting for close lock at 1733113476538Disabling compacts and flushes for region at 1733113476538Disabling writes for close at 1733113476538Writing region close event to WAL at 1733113476538Closed at 1733113476538 2024-12-02T04:24:36,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741969_1145 (size=65) 2024-12-02T04:24:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741969_1145 (size=65) 2024-12-02T04:24:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741969_1145 (size=65) 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing a8a75a2c72a1835b796320d54112331e, disabling compactions & flushes 2024-12-02T04:24:36,550 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. after waiting 0 ms 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,550 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,550 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for a8a75a2c72a1835b796320d54112331e: Waiting for close lock at 1733113476550Disabling compacts and flushes for region at 1733113476550Disabling writes for close at 1733113476550Writing region close event to WAL at 1733113476550Closed at 1733113476550 2024-12-02T04:24:36,552 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:24:36,553 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113476552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113476552"}]},"ts":"1733113476552"} 2024-12-02T04:24:36,553 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113476552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113476552"}]},"ts":"1733113476552"} 2024-12-02T04:24:36,556 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:24:36,557 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:24:36,557 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113476557"}]},"ts":"1733113476557"} 2024-12-02T04:24:36,559 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T04:24:36,559 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:24:36,561 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:24:36,561 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:24:36,561 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:24:36,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:24:36,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, ASSIGN}] 2024-12-02T04:24:36,563 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, ASSIGN 2024-12-02T04:24:36,563 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, ASSIGN 2024-12-02T04:24:36,564 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:24:36,564 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:24:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T04:24:36,714 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:24:36,714 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=6200c5eb2c04b1d4f3b9574ebc3b9096, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:36,714 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=a8a75a2c72a1835b796320d54112331e, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:36,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, ASSIGN because future has completed 2024-12-02T04:24:36,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:36,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, ASSIGN because future has completed 2024-12-02T04:24:36,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T04:24:36,876 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,876 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => a8a75a2c72a1835b796320d54112331e, NAME => 'testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:24:36,877 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,877 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. service=AccessControlService 2024-12-02T04:24:36,877 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 6200c5eb2c04b1d4f3b9574ebc3b9096, NAME => 'testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:24:36,877 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:36,877 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. service=AccessControlService 2024-12-02T04:24:36,877 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,877 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:36,877 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,878 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,879 INFO [StoreOpener-a8a75a2c72a1835b796320d54112331e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,880 INFO [StoreOpener-6200c5eb2c04b1d4f3b9574ebc3b9096-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,881 INFO [StoreOpener-6200c5eb2c04b1d4f3b9574ebc3b9096-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6200c5eb2c04b1d4f3b9574ebc3b9096 columnFamilyName cf 2024-12-02T04:24:36,881 INFO [StoreOpener-a8a75a2c72a1835b796320d54112331e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a8a75a2c72a1835b796320d54112331e columnFamilyName cf 2024-12-02T04:24:36,882 DEBUG [StoreOpener-a8a75a2c72a1835b796320d54112331e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:36,882 DEBUG [StoreOpener-6200c5eb2c04b1d4f3b9574ebc3b9096-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:36,883 INFO [StoreOpener-a8a75a2c72a1835b796320d54112331e-1 {}] regionserver.HStore(327): Store=a8a75a2c72a1835b796320d54112331e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:36,883 INFO [StoreOpener-6200c5eb2c04b1d4f3b9574ebc3b9096-1 {}] regionserver.HStore(327): Store=6200c5eb2c04b1d4f3b9574ebc3b9096/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:36,883 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,883 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,884 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,884 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,884 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,885 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,885 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,885 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,885 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,885 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,887 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,887 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,889 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:36,889 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:36,889 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened a8a75a2c72a1835b796320d54112331e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62603799, jitterRate=-0.06713069975376129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:36,889 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 6200c5eb2c04b1d4f3b9574ebc3b9096; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68834102, jitterRate=0.025708049535751343}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:36,889 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:36,889 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:36,890 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 6200c5eb2c04b1d4f3b9574ebc3b9096: Running coprocessor pre-open hook at 1733113476878Writing region info on filesystem at 1733113476878Initializing all the Stores at 1733113476879 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113476879Cleaning up temporary data from old regions at 1733113476885 (+6 ms)Running coprocessor post-open hooks at 1733113476889 (+4 ms)Region opened successfully at 1733113476890 (+1 ms) 2024-12-02T04:24:36,890 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for a8a75a2c72a1835b796320d54112331e: Running coprocessor pre-open hook at 1733113476878Writing region info on filesystem at 1733113476878Initializing all the Stores at 1733113476879 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113476879Cleaning up temporary data from old regions at 1733113476885 (+6 ms)Running coprocessor post-open hooks at 1733113476889 (+4 ms)Region opened successfully at 1733113476890 (+1 ms) 2024-12-02T04:24:36,891 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096., pid=66, masterSystemTime=1733113476869 2024-12-02T04:24:36,891 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e., pid=67, masterSystemTime=1733113476870 2024-12-02T04:24:36,892 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,892 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:36,893 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=a8a75a2c72a1835b796320d54112331e, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:36,893 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,893 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:36,894 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=6200c5eb2c04b1d4f3b9574ebc3b9096, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:36,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:36,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:36,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-12-02T04:24:36,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088 in 178 msec 2024-12-02T04:24:36,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-12-02T04:24:36,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258 in 180 msec 2024-12-02T04:24:36,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, ASSIGN in 336 msec 2024-12-02T04:24:36,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-12-02T04:24:36,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, ASSIGN in 337 msec 2024-12-02T04:24:36,900 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:24:36,901 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113476900"}]},"ts":"1733113476900"} 2024-12-02T04:24:36,902 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T04:24:36,903 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:24:36,903 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-02T04:24:36,907 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T04:24:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:36,992 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:36,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:36,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:36,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:36,996 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 482 msec 2024-12-02T04:24:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T04:24:37,140 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T04:24:37,140 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-02T04:24:37,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,145 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:37,147 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,154 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,161 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T04:24:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113477164 (current time:1733113477164). 2024-12-02T04:24:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:24:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T04:24:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:24:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6d1687, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:37,165 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:37,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:37,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:37,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d95670, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:37,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:37,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,167 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:37,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1907df76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:37,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:37,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,169 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59466, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,170 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,171 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a8d1a04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:37,172 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:37,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:37,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:37,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6619ef8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:37,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:37,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,173 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:37,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33526be1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:37,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:37,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,177 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:24:37,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,179 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47052, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,181 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,181 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T04:24:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:24:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T04:24:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T04:24:37,183 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:24:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T04:24:37,185 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:24:37,187 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:24:37,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741971_1147 (size=161) 2024-12-02T04:24:37,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741971_1147 (size=161) 2024-12-02T04:24:37,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741971_1147 (size=161) 2024-12-02T04:24:37,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:24:37,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096}] 2024-12-02T04:24:37,196 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,196 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T04:24:37,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-02T04:24:37,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for a8a75a2c72a1835b796320d54112331e: 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 6200c5eb2c04b1d4f3b9574ebc3b9096: 2024-12-02T04:24:37,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:37,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:24:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741972_1148 (size=68) 2024-12-02T04:24:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741972_1148 (size=68) 2024-12-02T04:24:37,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741972_1148 (size=68) 2024-12-02T04:24:37,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741973_1149 (size=68) 2024-12-02T04:24:37,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741973_1149 (size=68) 2024-12-02T04:24:37,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741973_1149 (size=68) 2024-12-02T04:24:37,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-02T04:24:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-02T04:24:37,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:37,362 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-02T04:24:37,362 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-02T04:24:37,362 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,363 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e in 168 msec 2024-12-02T04:24:37,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-02T04:24:37,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 in 168 msec 2024-12-02T04:24:37,365 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:24:37,366 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:24:37,367 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:24:37,367 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:24:37,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:37,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:24:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741974_1150 (size=60) 2024-12-02T04:24:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741974_1150 (size=60) 2024-12-02T04:24:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741974_1150 (size=60) 2024-12-02T04:24:37,376 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:24:37,376 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:37,376 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:37,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741975_1151 (size=641) 2024-12-02T04:24:37,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741975_1151 (size=641) 2024-12-02T04:24:37,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741975_1151 (size=641) 2024-12-02T04:24:37,390 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:24:37,395 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:24:37,395 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:37,397 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:24:37,397 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T04:24:37,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 215 msec 2024-12-02T04:24:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T04:24:37,498 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T04:24:37,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:37,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:37,507 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,510 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-02T04:24:37,510 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,510 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:37,513 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,518 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,525 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:37,528 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T04:24:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113477528 (current time:1733113477528). 2024-12-02T04:24:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:24:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T04:24:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:24:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@320789d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:37,530 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:37,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:37,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:37,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16e3ffae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:37,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:37,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,532 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35438, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:37,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68753cea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:37,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:37,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,534 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,536 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5df63789, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:37,538 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:37,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:37,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:37,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6284f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:37,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:37,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,539 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:37,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13748d25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:37,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:37,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,543 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:24:37,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:37,546 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:37,547 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:37,547 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T04:24:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:24:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T04:24:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T04:24:37,550 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:24:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T04:24:37,551 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:24:37,554 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:24:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741976_1152 (size=156) 2024-12-02T04:24:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741976_1152 (size=156) 2024-12-02T04:24:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741976_1152 (size=156) 2024-12-02T04:24:37,566 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:24:37,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096}] 2024-12-02T04:24:37,567 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,567 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T04:24:37,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-02T04:24:37,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-02T04:24:37,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:37,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing a8a75a2c72a1835b796320d54112331e 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-02T04:24:37,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 6200c5eb2c04b1d4f3b9574ebc3b9096 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-02T04:24:37,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e is 71, key is 000b5bdb792766446a86adad790d8a89/cf:q/1733113477505/Put/seqid=0 2024-12-02T04:24:37,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 is 71, key is 1414ab6f6639fa45e1db1d0dabddccc4/cf:q/1733113477506/Put/seqid=0 2024-12-02T04:24:37,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741977_1153 (size=5311) 2024-12-02T04:24:37,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741977_1153 (size=5311) 2024-12-02T04:24:37,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741977_1153 (size=5311) 2024-12-02T04:24:37,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:37,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741978_1154 (size=7961) 2024-12-02T04:24:37,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741978_1154 (size=7961) 2024-12-02T04:24:37,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741978_1154 (size=7961) 2024-12-02T04:24:37,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:37,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/.tmp/cf/0339cb667c7a438bb55b2513222837a3, store: [table=testtb-testExportWithResetTtl family=cf region=a8a75a2c72a1835b796320d54112331e] 2024-12-02T04:24:37,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/.tmp/cf/0339cb667c7a438bb55b2513222837a3 is 206, key is 0ae3e58070257dfa70ffb43378d6b5de0/cf:q/1733113477505/Put/seqid=0 2024-12-02T04:24:37,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741979_1155 (size=6510) 2024-12-02T04:24:37,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741979_1155 (size=6510) 2024-12-02T04:24:37,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741979_1155 (size=6510) 2024-12-02T04:24:37,756 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/.tmp/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0, store: [table=testtb-testExportWithResetTtl family=cf region=6200c5eb2c04b1d4f3b9574ebc3b9096] 2024-12-02T04:24:37,756 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/.tmp/cf/0339cb667c7a438bb55b2513222837a3 2024-12-02T04:24:37,756 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/.tmp/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 is 206, key is 1509c4a106289757327f19ea8edba24e6/cf:q/1733113477506/Put/seqid=0 2024-12-02T04:24:37,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741980_1156 (size=14247) 2024-12-02T04:24:37,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741980_1156 (size=14247) 2024-12-02T04:24:37,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741980_1156 (size=14247) 2024-12-02T04:24:37,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/.tmp/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 2024-12-02T04:24:37,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/.tmp/cf/0339cb667c7a438bb55b2513222837a3 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3 2024-12-02T04:24:37,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3, entries=6, sequenceid=6, filesize=6.4 K 2024-12-02T04:24:37,771 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for a8a75a2c72a1835b796320d54112331e in 50ms, sequenceid=6, compaction requested=false 2024-12-02T04:24:37,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for a8a75a2c72a1835b796320d54112331e: 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. for snaptb0-testExportWithResetTtl completed. 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3] hfiles 2024-12-02T04:24:37,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3 for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,773 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000004/launch_container.sh] 2024-12-02T04:24:37,773 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000004/container_tokens] 2024-12-02T04:24:37,773 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000004/sysfs] 2024-12-02T04:24:37,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/.tmp/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 2024-12-02T04:24:37,784 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0, entries=44, sequenceid=6, filesize=13.9 K 2024-12-02T04:24:37,785 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 6200c5eb2c04b1d4f3b9574ebc3b9096 in 63ms, sequenceid=6, compaction requested=false 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 6200c5eb2c04b1d4f3b9574ebc3b9096: 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. for snaptb0-testExportWithResetTtl completed. 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0] hfiles 2024-12-02T04:24:37,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741981_1157 (size=107) 2024-12-02T04:24:37,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741981_1157 (size=107) 2024-12-02T04:24:37,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741981_1157 (size=107) 2024-12-02T04:24:37,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:37,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-02T04:24:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-02T04:24:37,792 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,792 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a8a75a2c72a1835b796320d54112331e in 227 msec 2024-12-02T04:24:37,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741982_1158 (size=107) 2024-12-02T04:24:37,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741982_1158 (size=107) 2024-12-02T04:24:37,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741982_1158 (size=107) 2024-12-02T04:24:37,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:37,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-02T04:24:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-02T04:24:37,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,797 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-02T04:24:37,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096 in 232 msec 2024-12-02T04:24:37,799 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:24:37,800 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:24:37,801 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:24:37,801 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:24:37,801 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:37,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e] hfiles 2024-12-02T04:24:37,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:37,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:37,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741983_1159 (size=291) 2024-12-02T04:24:37,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741983_1159 (size=291) 2024-12-02T04:24:37,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741983_1159 (size=291) 2024-12-02T04:24:37,810 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:24:37,810 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,811 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741984_1160 (size=951) 2024-12-02T04:24:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741984_1160 (size=951) 2024-12-02T04:24:37,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741984_1160 (size=951) 2024-12-02T04:24:37,827 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:24:37,834 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:24:37,834 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-02T04:24:37,836 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:24:37,836 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T04:24:37,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 288 msec 2024-12-02T04:24:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T04:24:37,868 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T04:24:37,871 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:24:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:37,875 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:24:37,875 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-02T04:24:37,876 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:24:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T04:24:37,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741985_1161 (size=433) 2024-12-02T04:24:37,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741985_1161 (size=433) 2024-12-02T04:24:37,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741985_1161 (size=433) 2024-12-02T04:24:37,888 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 90022892cad5d7e9e79d6624360d1d9b, NAME => 'testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:37,890 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 626e614376b459e36f1c353808824884, NAME => 'testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741986_1162 (size=58) 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741986_1162 (size=58) 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741987_1163 (size=58) 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741986_1162 (size=58) 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741987_1163 (size=58) 2024-12-02T04:24:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741987_1163 (size=58) 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 90022892cad5d7e9e79d6624360d1d9b, disabling compactions & flushes 2024-12-02T04:24:37,897 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. after waiting 0 ms 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:37,897 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 90022892cad5d7e9e79d6624360d1d9b: Waiting for close lock at 1733113477897Disabling compacts and flushes for region at 1733113477897Disabling writes for close at 1733113477897Writing region close event to WAL at 1733113477897Closed at 1733113477897 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 626e614376b459e36f1c353808824884, disabling compactions & flushes 2024-12-02T04:24:37,897 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. after waiting 0 ms 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:37,897 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:37,897 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 626e614376b459e36f1c353808824884: Waiting for close lock at 1733113477897Disabling compacts and flushes for region at 1733113477897Disabling writes for close at 1733113477897Writing region close event to WAL at 1733113477897Closed at 1733113477897 2024-12-02T04:24:37,898 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:24:37,899 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733113477898"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113477898"}]},"ts":"1733113477898"} 2024-12-02T04:24:37,899 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733113477898"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113477898"}]},"ts":"1733113477898"} 2024-12-02T04:24:37,901 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:24:37,902 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:24:37,902 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113477902"}]},"ts":"1733113477902"} 2024-12-02T04:24:37,903 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T04:24:37,903 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:24:37,905 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:24:37,905 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:24:37,905 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:24:37,905 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:24:37,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, ASSIGN}] 2024-12-02T04:24:37,907 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, ASSIGN 2024-12-02T04:24:37,907 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, ASSIGN 2024-12-02T04:24:37,908 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:24:37,908 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:24:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T04:24:38,058 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:24:38,059 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=90022892cad5d7e9e79d6624360d1d9b, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:38,059 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=626e614376b459e36f1c353808824884, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, ASSIGN because future has completed 2024-12-02T04:24:38,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:38,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, ASSIGN because future has completed 2024-12-02T04:24:38,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T04:24:38,223 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:38,224 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 626e614376b459e36f1c353808824884, NAME => 'testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:24:38,224 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:38,224 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 90022892cad5d7e9e79d6624360d1d9b, NAME => 'testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. service=AccessControlService 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. service=AccessControlService 2024-12-02T04:24:38,225 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:38,225 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,225 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,227 INFO [StoreOpener-626e614376b459e36f1c353808824884-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,227 INFO [StoreOpener-90022892cad5d7e9e79d6624360d1d9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,228 INFO [StoreOpener-626e614376b459e36f1c353808824884-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 626e614376b459e36f1c353808824884 columnFamilyName cf 2024-12-02T04:24:38,228 INFO [StoreOpener-90022892cad5d7e9e79d6624360d1d9b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90022892cad5d7e9e79d6624360d1d9b columnFamilyName cf 2024-12-02T04:24:38,229 DEBUG [StoreOpener-90022892cad5d7e9e79d6624360d1d9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:38,229 DEBUG [StoreOpener-626e614376b459e36f1c353808824884-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:38,229 INFO [StoreOpener-90022892cad5d7e9e79d6624360d1d9b-1 {}] regionserver.HStore(327): Store=90022892cad5d7e9e79d6624360d1d9b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:38,229 INFO [StoreOpener-626e614376b459e36f1c353808824884-1 {}] regionserver.HStore(327): Store=626e614376b459e36f1c353808824884/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:38,230 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,230 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,230 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,231 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,233 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,233 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,235 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:38,235 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:38,235 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 90022892cad5d7e9e79d6624360d1d9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72100693, jitterRate=0.07438404858112335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:38,235 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 626e614376b459e36f1c353808824884; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70598453, jitterRate=0.05199892818927765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:38,235 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:38,235 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 626e614376b459e36f1c353808824884 2024-12-02T04:24:38,236 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 90022892cad5d7e9e79d6624360d1d9b: Running coprocessor pre-open hook at 1733113478225Writing region info on filesystem at 1733113478225Initializing all the Stores at 1733113478226 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113478226Cleaning up temporary data from old regions at 1733113478231 (+5 ms)Running coprocessor post-open hooks at 1733113478235 (+4 ms)Region opened successfully at 1733113478236 (+1 ms) 2024-12-02T04:24:38,236 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 626e614376b459e36f1c353808824884: Running coprocessor pre-open hook at 1733113478225Writing region info on filesystem at 1733113478225Initializing all the Stores at 1733113478226 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113478226Cleaning up temporary data from old regions at 1733113478231 (+5 ms)Running coprocessor post-open hooks at 1733113478235 (+4 ms)Region opened successfully at 1733113478236 (+1 ms) 2024-12-02T04:24:38,236 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b., pid=78, masterSystemTime=1733113478217 2024-12-02T04:24:38,236 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884., pid=77, masterSystemTime=1733113478216 2024-12-02T04:24:38,238 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:38,238 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:38,239 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=90022892cad5d7e9e79d6624360d1d9b, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:38,239 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:38,239 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:38,239 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=626e614376b459e36f1c353808824884, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:38,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:38,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:38,243 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-02T04:24:38,243 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217 in 176 msec 2024-12-02T04:24:38,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-02T04:24:38,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, ASSIGN in 338 msec 2024-12-02T04:24:38,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258 in 179 msec 2024-12-02T04:24:38,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-02T04:24:38,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, ASSIGN in 339 msec 2024-12-02T04:24:38,247 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:24:38,247 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113478247"}]},"ts":"1733113478247"} 2024-12-02T04:24:38,249 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T04:24:38,250 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:24:38,250 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-02T04:24:38,254 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T04:24:38,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:38,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:38,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:38,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:38,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 969 msec 2024-12-02T04:24:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T04:24:39,019 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-02T04:24:39,019 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,026 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-02T04:24:39,026 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:39,026 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:39,028 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,033 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,038 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:39,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:24:39,048 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,050 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-02T04:24:39,050 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:39,050 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:39,052 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,056 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,060 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T04:24:39,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T04:24:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113479074 (current time:1733113479074). 2024-12-02T04:24:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T04:24:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:24:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1992883c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:39,076 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:39,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:39,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:39,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543ecace, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:39,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:39,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,078 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:39,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19b1f1fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:39,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:39,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:39,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:39,082 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,082 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@761d9e93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:24:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:24:39,084 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:24:39,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:24:39,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:24:39,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f8f77cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:24:39,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:24:39,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,085 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:24:39,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a5ebb8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:24:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:24:39,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:24:39,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:39,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59510, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:39,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:24:39,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:24:39,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:24:39,092 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:24:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:24:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:24:39,092 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:24:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T04:24:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:24:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T04:24:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T04:24:39,095 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:24:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T04:24:39,096 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:24:39,098 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:24:39,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741988_1164 (size=143) 2024-12-02T04:24:39,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741988_1164 (size=143) 2024-12-02T04:24:39,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741988_1164 (size=143) 2024-12-02T04:24:39,106 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:24:39,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90022892cad5d7e9e79d6624360d1d9b}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 626e614376b459e36f1c353808824884}] 2024-12-02T04:24:39,107 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:39,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 626e614376b459e36f1c353808824884 2024-12-02T04:24:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T04:24:39,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-02T04:24:39,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-02T04:24:39,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:39,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:39,259 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 90022892cad5d7e9e79d6624360d1d9b 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T04:24:39,259 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 626e614376b459e36f1c353808824884 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T04:24:39,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b is 71, key is 004e4d7d69db54f6a4d52970d01cd76f/cf:q/1733113479045/Put/seqid=0 2024-12-02T04:24:39,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 is 71, key is 220188229b9afd616a7d0f387bd943e6/cf:q/1733113479046/Put/seqid=0 2024-12-02T04:24:39,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741990_1166 (size=8102) 2024-12-02T04:24:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741990_1166 (size=8102) 2024-12-02T04:24:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741990_1166 (size=8102) 2024-12-02T04:24:39,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:39,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 2024-12-02T04:24:39,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/.tmp/cf/3b15607a96084a2bbe65ff60e830e7b3, store: [table=testExportWithResetTtl family=cf region=626e614376b459e36f1c353808824884] 2024-12-02T04:24:39,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/.tmp/cf/3b15607a96084a2bbe65ff60e830e7b3 is 199, key is 1ae3e0d34b155ae6ed856088bd989df85/cf:q/1733113479046/Put/seqid=0 2024-12-02T04:24:39,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741989_1165 (size=5172) 2024-12-02T04:24:39,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741989_1165 (size=5172) 2024-12-02T04:24:39,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741989_1165 (size=5172) 2024-12-02T04:24:39,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:39,311 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:39,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/.tmp/cf/4fb16792ec594e288c194c3d784072f4, store: [table=testExportWithResetTtl family=cf region=90022892cad5d7e9e79d6624360d1d9b] 2024-12-02T04:24:39,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/.tmp/cf/4fb16792ec594e288c194c3d784072f4 is 199, key is 0d70bfb8881d8ad4041a8d5cadaaff989/cf:q/1733113479045/Put/seqid=0 2024-12-02T04:24:39,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741991_1167 (size=14324) 2024-12-02T04:24:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741991_1167 (size=14324) 2024-12-02T04:24:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741991_1167 (size=14324) 2024-12-02T04:24:39,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/.tmp/cf/3b15607a96084a2bbe65ff60e830e7b3 2024-12-02T04:24:39,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/.tmp/cf/3b15607a96084a2bbe65ff60e830e7b3 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3 2024-12-02T04:24:39,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741992_1168 (size=6073) 2024-12-02T04:24:39,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741992_1168 (size=6073) 2024-12-02T04:24:39,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741992_1168 (size=6073) 2024-12-02T04:24:39,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/.tmp/cf/4fb16792ec594e288c194c3d784072f4 2024-12-02T04:24:39,332 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3, entries=46, sequenceid=5, filesize=14.0 K 2024-12-02T04:24:39,333 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 626e614376b459e36f1c353808824884 in 73ms, sequenceid=5, compaction requested=false 2024-12-02T04:24:39,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-02T04:24:39,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 626e614376b459e36f1c353808824884: 2024-12-02T04:24:39,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. for snaptb-testExportWithResetTtl completed. 2024-12-02T04:24:39,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T04:24:39,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:39,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3] hfiles 2024-12-02T04:24:39,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3 for snapshot=snaptb-testExportWithResetTtl 2024-12-02T04:24:39,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/.tmp/cf/4fb16792ec594e288c194c3d784072f4 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4 2024-12-02T04:24:39,340 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4, entries=4, sequenceid=5, filesize=5.9 K 2024-12-02T04:24:39,341 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 90022892cad5d7e9e79d6624360d1d9b in 82ms, sequenceid=5, compaction requested=false 2024-12-02T04:24:39,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 90022892cad5d7e9e79d6624360d1d9b: 2024-12-02T04:24:39,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. for snaptb-testExportWithResetTtl completed. 2024-12-02T04:24:39,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T04:24:39,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:24:39,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4] hfiles 2024-12-02T04:24:39,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4 for snapshot=snaptb-testExportWithResetTtl 2024-12-02T04:24:39,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741993_1169 (size=100) 2024-12-02T04:24:39,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741993_1169 (size=100) 2024-12-02T04:24:39,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741993_1169 (size=100) 2024-12-02T04:24:39,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:39,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-02T04:24:39,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-02T04:24:39,345 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 626e614376b459e36f1c353808824884 2024-12-02T04:24:39,346 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 626e614376b459e36f1c353808824884 2024-12-02T04:24:39,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 626e614376b459e36f1c353808824884 in 240 msec 2024-12-02T04:24:39,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741994_1170 (size=100) 2024-12-02T04:24:39,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741994_1170 (size=100) 2024-12-02T04:24:39,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741994_1170 (size=100) 2024-12-02T04:24:39,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:39,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-02T04:24:39,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-02T04:24:39,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:39,358 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:39,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-02T04:24:39,360 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:24:39,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 90022892cad5d7e9e79d6624360d1d9b in 253 msec 2024-12-02T04:24:39,361 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:24:39,362 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:24:39,362 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:24:39,363 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:39,365 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b] hfiles 2024-12-02T04:24:39,365 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 2024-12-02T04:24:39,365 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741995_1171 (size=284) 2024-12-02T04:24:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741995_1171 (size=284) 2024-12-02T04:24:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741995_1171 (size=284) 2024-12-02T04:24:39,372 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:24:39,373 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-02T04:24:39,374 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T04:24:39,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741996_1172 (size=923) 2024-12-02T04:24:39,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741996_1172 (size=923) 2024-12-02T04:24:39,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741996_1172 (size=923) 2024-12-02T04:24:39,391 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:24:39,397 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:24:39,397 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T04:24:39,398 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:24:39,399 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T04:24:39,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 306 msec 2024-12-02T04:24:39,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T04:24:39,408 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-02T04:24:39,417 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416 2024-12-02T04:24:39,417 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:39,444 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:39,444 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T04:24:39,445 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:24:39,450 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T04:24:39,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741997_1173 (size=143) 2024-12-02T04:24:39,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741997_1173 (size=143) 2024-12-02T04:24:39,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741997_1173 (size=143) 2024-12-02T04:24:39,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741998_1174 (size=923) 2024-12-02T04:24:39,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741998_1174 (size=923) 2024-12-02T04:24:39,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741998_1174 (size=923) 2024-12-02T04:24:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741999_1175 (size=141) 2024-12-02T04:24:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741999_1175 (size=141) 2024-12-02T04:24:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741999_1175 (size=141) 2024-12-02T04:24:39,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:39,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:39,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-15119946839931586383.jar 2024-12-02T04:24:40,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,393 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0002_000001 (auth:SIMPLE) from 127.0.0.1:58162 2024-12-02T04:24:40,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000001/launch_container.sh] 2024-12-02T04:24:40,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000001/container_tokens] 2024-12-02T04:24:40,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0002/container_1733113348655_0002_01_000001/sysfs] 2024-12-02T04:24:40,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-13028632344745663738.jar 2024-12-02T04:24:40,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:24:40,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:24:40,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:24:40,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:40,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:40,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:40,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:40,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:24:40,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:40,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:24:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742000_1176 (size=131440) 2024-12-02T04:24:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742000_1176 (size=131440) 2024-12-02T04:24:40,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742000_1176 (size=131440) 2024-12-02T04:24:40,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T04:24:40,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T04:24:40,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T04:24:40,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T04:24:40,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T04:24:40,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T04:24:40,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742003_1179 (size=903927) 2024-12-02T04:24:40,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742003_1179 (size=903927) 2024-12-02T04:24:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742003_1179 (size=903927) 2024-12-02T04:24:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T04:24:40,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T04:24:40,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T04:24:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742005_1181 (size=6424732) 2024-12-02T04:24:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742005_1181 (size=6424732) 2024-12-02T04:24:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742005_1181 (size=6424732) 2024-12-02T04:24:40,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T04:24:40,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T04:24:40,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T04:24:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742007_1183 (size=77835) 2024-12-02T04:24:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742007_1183 (size=77835) 2024-12-02T04:24:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742007_1183 (size=77835) 2024-12-02T04:24:40,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742008_1184 (size=30949) 2024-12-02T04:24:40,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742008_1184 (size=30949) 2024-12-02T04:24:40,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742008_1184 (size=30949) 2024-12-02T04:24:40,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T04:24:40,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T04:24:40,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T04:24:40,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742010_1186 (size=440957) 2024-12-02T04:24:40,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742010_1186 (size=440957) 2024-12-02T04:24:40,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742010_1186 (size=440957) 2024-12-02T04:24:40,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T04:24:40,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T04:24:40,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T04:24:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742012_1188 (size=232957) 2024-12-02T04:24:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742012_1188 (size=232957) 2024-12-02T04:24:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742012_1188 (size=232957) 2024-12-02T04:24:40,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742013_1189 (size=127628) 2024-12-02T04:24:40,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742013_1189 (size=127628) 2024-12-02T04:24:40,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742013_1189 (size=127628) 2024-12-02T04:24:40,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742014_1190 (size=20406) 2024-12-02T04:24:40,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742014_1190 (size=20406) 2024-12-02T04:24:40,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742014_1190 (size=20406) 2024-12-02T04:24:41,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T04:24:41,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T04:24:41,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T04:24:41,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742016_1192 (size=217634) 2024-12-02T04:24:41,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742016_1192 (size=217634) 2024-12-02T04:24:41,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742016_1192 (size=217634) 2024-12-02T04:24:41,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T04:24:41,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T04:24:41,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T04:24:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742018_1194 (size=322274) 2024-12-02T04:24:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742018_1194 (size=322274) 2024-12-02T04:24:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742018_1194 (size=322274) 2024-12-02T04:24:41,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742019_1195 (size=503880) 2024-12-02T04:24:41,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742019_1195 (size=503880) 2024-12-02T04:24:41,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742019_1195 (size=503880) 2024-12-02T04:24:41,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742020_1196 (size=29229) 2024-12-02T04:24:41,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742020_1196 (size=29229) 2024-12-02T04:24:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742020_1196 (size=29229) 2024-12-02T04:24:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742021_1197 (size=24096) 2024-12-02T04:24:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742021_1197 (size=24096) 2024-12-02T04:24:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742021_1197 (size=24096) 2024-12-02T04:24:41,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742022_1198 (size=111872) 2024-12-02T04:24:41,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742022_1198 (size=111872) 2024-12-02T04:24:41,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742022_1198 (size=111872) 2024-12-02T04:24:41,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742023_1199 (size=45609) 2024-12-02T04:24:41,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742023_1199 (size=45609) 2024-12-02T04:24:41,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742023_1199 (size=45609) 2024-12-02T04:24:41,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742024_1200 (size=136454) 2024-12-02T04:24:41,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742024_1200 (size=136454) 2024-12-02T04:24:41,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742024_1200 (size=136454) 2024-12-02T04:24:41,196 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:24:41,199 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-02T04:24:41,202 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.0 K 2024-12-02T04:24:41,202 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-02T04:24:41,202 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.9 K 2024-12-02T04:24:41,202 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-02T04:24:41,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742025_1201 (size=995) 2024-12-02T04:24:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742025_1201 (size=995) 2024-12-02T04:24:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742025_1201 (size=995) 2024-12-02T04:24:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742026_1202 (size=35) 2024-12-02T04:24:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742026_1202 (size=35) 2024-12-02T04:24:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742026_1202 (size=35) 2024-12-02T04:24:41,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742027_1203 (size=303994) 2024-12-02T04:24:41,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742027_1203 (size=303994) 2024-12-02T04:24:41,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742027_1203 (size=303994) 2024-12-02T04:24:41,275 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:24:41,276 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:24:41,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:58176 2024-12-02T04:24:41,740 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:24:41,783 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T04:24:41,784 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T04:24:41,784 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T04:24:41,784 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T04:24:41,785 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T04:24:46,131 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:38768 2024-12-02T04:24:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742028_1204 (size=349692) 2024-12-02T04:24:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742028_1204 (size=349692) 2024-12-02T04:24:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742028_1204 (size=349692) 2024-12-02T04:24:47,287 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:24:48,401 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:40486 2024-12-02T04:24:48,401 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:50140 2024-12-02T04:24:49,241 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:50146 2024-12-02T04:24:49,241 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:40488 2024-12-02T04:24:50,562 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:24:51,609 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:24:53,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742029_1205 (size=14324) 2024-12-02T04:24:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742029_1205 (size=14324) 2024-12-02T04:24:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742029_1205 (size=14324) 2024-12-02T04:24:53,713 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000002/launch_container.sh] 2024-12-02T04:24:53,713 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000002/container_tokens] 2024-12-02T04:24:53,713 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000002/sysfs] 2024-12-02T04:24:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742031_1207 (size=5172) 2024-12-02T04:24:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742031_1207 (size=5172) 2024-12-02T04:24:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742031_1207 (size=5172) 2024-12-02T04:24:56,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742032_1208 (size=8102) 2024-12-02T04:24:56,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742032_1208 (size=8102) 2024-12-02T04:24:56,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742032_1208 (size=8102) 2024-12-02T04:24:56,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742033_1209 (size=6073) 2024-12-02T04:24:56,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742033_1209 (size=6073) 2024-12-02T04:24:56,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742033_1209 (size=6073) 2024-12-02T04:24:56,289 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000003/launch_container.sh] 2024-12-02T04:24:56,289 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000003/container_tokens] 2024-12-02T04:24:56,289 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000003/sysfs] 2024-12-02T04:24:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742030_1206 (size=31704) 2024-12-02T04:24:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742030_1206 (size=31704) 2024-12-02T04:24:56,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742030_1206 (size=31704) 2024-12-02T04:24:56,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742034_1210 (size=462) 2024-12-02T04:24:56,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742034_1210 (size=462) 2024-12-02T04:24:56,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742034_1210 (size=462) 2024-12-02T04:24:56,387 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000004/launch_container.sh] 2024-12-02T04:24:56,387 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000004/container_tokens] 2024-12-02T04:24:56,387 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000004/sysfs] 2024-12-02T04:24:56,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742035_1211 (size=31704) 2024-12-02T04:24:56,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742035_1211 (size=31704) 2024-12-02T04:24:56,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742035_1211 (size=31704) 2024-12-02T04:24:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742036_1212 (size=349692) 2024-12-02T04:24:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742036_1212 (size=349692) 2024-12-02T04:24:56,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742036_1212 (size=349692) 2024-12-02T04:24:56,432 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:56256 2024-12-02T04:24:58,495 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:24:58,496 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:24:58,516 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-02T04:24:58,516 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:24:58,516 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:24:58,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T04:24:58,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T04:24:58,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T04:24:58,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T04:24:58,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T04:24:58,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113479416/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T04:24:58,524 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-02T04:24:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T04:24:58,527 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113498527"}]},"ts":"1733113498527"} 2024-12-02T04:24:58,530 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T04:24:58,530 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-02T04:24:58,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-02T04:24:58,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, UNASSIGN}] 2024-12-02T04:24:58,533 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, UNASSIGN 2024-12-02T04:24:58,533 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, UNASSIGN 2024-12-02T04:24:58,533 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=626e614376b459e36f1c353808824884, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:58,533 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=90022892cad5d7e9e79d6624360d1d9b, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:24:58,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, UNASSIGN because future has completed 2024-12-02T04:24:58,535 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:58,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:24:58,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, UNASSIGN because future has completed 2024-12-02T04:24:58,537 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:58,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T04:24:58,690 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:58,690 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:58,690 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 90022892cad5d7e9e79d6624360d1d9b, disabling compactions & flushes 2024-12-02T04:24:58,690 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 626e614376b459e36f1c353808824884 2024-12-02T04:24:58,691 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. after waiting 0 ms 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 626e614376b459e36f1c353808824884, disabling compactions & flushes 2024-12-02T04:24:58,691 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. after waiting 0 ms 2024-12-02T04:24:58,691 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:58,707 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:24:58,707 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:24:58,709 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:58,709 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b. 2024-12-02T04:24:58,709 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 90022892cad5d7e9e79d6624360d1d9b: Waiting for close lock at 1733113498690Running coprocessor pre-close hooks at 1733113498690Disabling compacts and flushes for region at 1733113498690Disabling writes for close at 1733113498691 (+1 ms)Writing region close event to WAL at 1733113498701 (+10 ms)Running coprocessor post-close hooks at 1733113498709 (+8 ms)Closed at 1733113498709 2024-12-02T04:24:58,710 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:58,710 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884. 2024-12-02T04:24:58,710 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 626e614376b459e36f1c353808824884: Waiting for close lock at 1733113498691Running coprocessor pre-close hooks at 1733113498691Disabling compacts and flushes for region at 1733113498691Disabling writes for close at 1733113498691Writing region close event to WAL at 1733113498702 (+11 ms)Running coprocessor post-close hooks at 1733113498710 (+8 ms)Closed at 1733113498710 2024-12-02T04:24:58,716 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:58,717 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 626e614376b459e36f1c353808824884 2024-12-02T04:24:58,718 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=90022892cad5d7e9e79d6624360d1d9b, regionState=CLOSED 2024-12-02T04:24:58,720 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=626e614376b459e36f1c353808824884, regionState=CLOSED 2024-12-02T04:24:58,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:24:58,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:58,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-02T04:24:58,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 626e614376b459e36f1c353808824884, server=e2eaa0f11f7e,34291,1733113342258 in 188 msec 2024-12-02T04:24:58,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-02T04:24:58,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 90022892cad5d7e9e79d6624360d1d9b, server=e2eaa0f11f7e,37413,1733113342217 in 190 msec 2024-12-02T04:24:58,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=626e614376b459e36f1c353808824884, UNASSIGN in 198 msec 2024-12-02T04:24:58,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-12-02T04:24:58,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=90022892cad5d7e9e79d6624360d1d9b, UNASSIGN in 204 msec 2024-12-02T04:24:58,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-02T04:24:58,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 210 msec 2024-12-02T04:24:58,750 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113498749"}]},"ts":"1733113498749"} 2024-12-02T04:24:58,753 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T04:24:58,753 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-02T04:24:58,759 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 230 msec 2024-12-02T04:24:58,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T04:24:58,848 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-02T04:24:58,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-02T04:24:58,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,850 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-02T04:24:58,851 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,855 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-02T04:24:58,856 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:58,856 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884 2024-12-02T04:24:58,858 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/recovered.edits] 2024-12-02T04:24:58,858 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/recovered.edits] 2024-12-02T04:24:58,861 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/cf/3b15607a96084a2bbe65ff60e830e7b3 2024-12-02T04:24:58,862 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/cf/4fb16792ec594e288c194c3d784072f4 2024-12-02T04:24:58,864 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/recovered.edits/8.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b/recovered.edits/8.seqid 2024-12-02T04:24:58,864 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/recovered.edits/8.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884/recovered.edits/8.seqid 2024-12-02T04:24:58,865 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/626e614376b459e36f1c353808824884 2024-12-02T04:24:58,865 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportWithResetTtl/90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:58,865 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-02T04:24:58,865 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-02T04:24:58,866 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-02T04:24:58,869 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241202480fda87cff446e1963f327dfaa8387c_626e614376b459e36f1c353808824884 2024-12-02T04:24:58,870 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241202ae8b66a6d17a43b68e37a813afae3521_90022892cad5d7e9e79d6624360d1d9b 2024-12-02T04:24:58,871 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-02T04:24:58,873 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,876 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-02T04:24:58,879 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-02T04:24:58,881 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,881 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-02T04:24:58,881 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113498881"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:58,881 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113498881"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:58,884 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:24:58,884 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 90022892cad5d7e9e79d6624360d1d9b, NAME => 'testExportWithResetTtl,,1733113477870.90022892cad5d7e9e79d6624360d1d9b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 626e614376b459e36f1c353808824884, NAME => 'testExportWithResetTtl,1,1733113477870.626e614376b459e36f1c353808824884.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:24:58,884 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-02T04:24:58,884 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113498884"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:58,886 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-02T04:24:58,887 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T04:24:58,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,889 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T04:24:58,889 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T04:24:58,889 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T04:24:58,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T04:24:58,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 38 msec 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:58,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-02T04:24:58,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:58,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:58,898 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-02T04:24:58,898 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-02T04:24:58,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:58,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:58,899 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-02T04:24:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:58,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T04:24:58,902 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113498902"}]},"ts":"1733113498902"} 2024-12-02T04:24:58,904 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T04:24:58,904 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-02T04:24:58,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-02T04:24:58,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, UNASSIGN}] 2024-12-02T04:24:58,908 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, UNASSIGN 2024-12-02T04:24:58,908 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, UNASSIGN 2024-12-02T04:24:58,908 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=6200c5eb2c04b1d4f3b9574ebc3b9096, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:58,908 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=a8a75a2c72a1835b796320d54112331e, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:58,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, UNASSIGN because future has completed 2024-12-02T04:24:58,912 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:58,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:58,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, UNASSIGN because future has completed 2024-12-02T04:24:58,913 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:24:58,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T04:24:59,066 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 6200c5eb2c04b1d4f3b9574ebc3b9096, disabling compactions & flushes 2024-12-02T04:24:59,066 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. after waiting 0 ms 2024-12-02T04:24:59,066 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:59,066 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:24:59,067 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing a8a75a2c72a1835b796320d54112331e, disabling compactions & flushes 2024-12-02T04:24:59,067 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:59,067 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:59,067 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. after waiting 0 ms 2024-12-02T04:24:59,067 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:59,071 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:59,072 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:24:59,072 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:59,072 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096. 2024-12-02T04:24:59,072 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 6200c5eb2c04b1d4f3b9574ebc3b9096: Waiting for close lock at 1733113499066Running coprocessor pre-close hooks at 1733113499066Disabling compacts and flushes for region at 1733113499066Disabling writes for close at 1733113499066Writing region close event to WAL at 1733113499068 (+2 ms)Running coprocessor post-close hooks at 1733113499072 (+4 ms)Closed at 1733113499072 2024-12-02T04:24:59,073 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:24:59,073 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e. 2024-12-02T04:24:59,073 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for a8a75a2c72a1835b796320d54112331e: Waiting for close lock at 1733113499066Running coprocessor pre-close hooks at 1733113499066Disabling compacts and flushes for region at 1733113499066Disabling writes for close at 1733113499067 (+1 ms)Writing region close event to WAL at 1733113499067Running coprocessor post-close hooks at 1733113499073 (+6 ms)Closed at 1733113499073 2024-12-02T04:24:59,074 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:59,075 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=6200c5eb2c04b1d4f3b9574ebc3b9096, regionState=CLOSED 2024-12-02T04:24:59,075 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:59,076 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=a8a75a2c72a1835b796320d54112331e, regionState=CLOSED 2024-12-02T04:24:59,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:59,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:59,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-02T04:24:59,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 6200c5eb2c04b1d4f3b9574ebc3b9096, server=e2eaa0f11f7e,34291,1733113342258 in 166 msec 2024-12-02T04:24:59,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-02T04:24:59,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure a8a75a2c72a1835b796320d54112331e, server=e2eaa0f11f7e,41643,1733113342088 in 167 msec 2024-12-02T04:24:59,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6200c5eb2c04b1d4f3b9574ebc3b9096, UNASSIGN in 173 msec 2024-12-02T04:24:59,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-02T04:24:59,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8a75a2c72a1835b796320d54112331e, UNASSIGN in 174 msec 2024-12-02T04:24:59,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-02T04:24:59,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 178 msec 2024-12-02T04:24:59,086 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113499086"}]},"ts":"1733113499086"} 2024-12-02T04:24:59,088 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T04:24:59,088 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-02T04:24:59,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 190 msec 2024-12-02T04:24:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T04:24:59,218 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T04:24:59,219 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-02T04:24:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,221 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-02T04:24:59,223 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,225 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-02T04:24:59,227 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:59,227 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:59,230 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/recovered.edits] 2024-12-02T04:24:59,231 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/recovered.edits] 2024-12-02T04:24:59,238 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/cf/0339cb667c7a438bb55b2513222837a3 2024-12-02T04:24:59,238 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/cf/99924ff4d6ad4c5e9c03b1a4c098fdb0 2024-12-02T04:24:59,241 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e/recovered.edits/9.seqid 2024-12-02T04:24:59,241 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096/recovered.edits/9.seqid 2024-12-02T04:24:59,242 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:59,242 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithResetTtl/6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:59,242 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-02T04:24:59,242 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-02T04:24:59,243 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-02T04:24:59,250 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202981fe77b2fb34d03887a4189d191e0a5_6200c5eb2c04b1d4f3b9574ebc3b9096 2024-12-02T04:24:59,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,256 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202afd563a636a34642a68012c30ae724c5_a8a75a2c72a1835b796320d54112331e 2024-12-02T04:24:59,257 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-02T04:24:59,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T04:24:59,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T04:24:59,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T04:24:59,264 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,268 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-02T04:24:59,271 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-02T04:24:59,273 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,273 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-02T04:24:59,273 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113499273"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:59,273 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113499273"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:59,276 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:24:59,276 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a8a75a2c72a1835b796320d54112331e, NAME => 'testtb-testExportWithResetTtl,,1733113476510.a8a75a2c72a1835b796320d54112331e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6200c5eb2c04b1d4f3b9574ebc3b9096, NAME => 'testtb-testExportWithResetTtl,1,1733113476510.6200c5eb2c04b1d4f3b9574ebc3b9096.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:24:59,276 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-02T04:24:59,276 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113499276"}]},"ts":"9223372036854775807"} 2024-12-02T04:24:59,279 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-02T04:24:59,280 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T04:24:59,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 61 msec 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T04:24:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-02T04:24:59,285 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:24:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-02T04:24:59,285 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-02T04:24:59,285 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T04:24:59,298 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T04:24:59,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-02T04:24:59,302 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-02T04:24:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-02T04:24:59,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T04:24:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-02T04:24:59,339 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=799 (was 797) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-956771194_1 at /127.0.0.1:54642 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:54666 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 129099) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:44478 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3046 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41199 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:49704 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-956771194_1 at /127.0.0.1:49672 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=821 (was 817) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=711 (was 629) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 22), AvailableMemoryMB=1979 (was 2028) 2024-12-02T04:24:59,339 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-02T04:24:59,358 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=799, OpenFileDescriptor=821, MaxFileDescriptor=1048576, SystemLoadAverage=711, ProcessCount=19, AvailableMemoryMB=1980 2024-12-02T04:24:59,358 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-02T04:24:59,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:24:59,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:24:59,363 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:24:59,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-02T04:24:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T04:24:59,365 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:24:59,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742037_1213 (size=443) 2024-12-02T04:24:59,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742037_1213 (size=443) 2024-12-02T04:24:59,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742037_1213 (size=443) 2024-12-02T04:24:59,384 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 82326a9d346c15c1e5d33a921b08618a, NAME => 'testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:59,385 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6e4aa4e7a9b1749bcb2f3bbe50e7512f, NAME => 'testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:24:59,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742038_1214 (size=68) 2024-12-02T04:24:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742038_1214 (size=68) 2024-12-02T04:24:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742038_1214 (size=68) 2024-12-02T04:24:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742039_1215 (size=68) 2024-12-02T04:24:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742039_1215 (size=68) 2024-12-02T04:24:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742039_1215 (size=68) 2024-12-02T04:24:59,406 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 82326a9d346c15c1e5d33a921b08618a, disabling compactions & flushes 2024-12-02T04:24:59,407 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. after waiting 0 ms 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,407 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 82326a9d346c15c1e5d33a921b08618a: Waiting for close lock at 1733113499407Disabling compacts and flushes for region at 1733113499407Disabling writes for close at 1733113499407Writing region close event to WAL at 1733113499407Closed at 1733113499407 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 6e4aa4e7a9b1749bcb2f3bbe50e7512f, disabling compactions & flushes 2024-12-02T04:24:59,407 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. after waiting 0 ms 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,407 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,407 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f: Waiting for close lock at 1733113499407Disabling compacts and flushes for region at 1733113499407Disabling writes for close at 1733113499407Writing region close event to WAL at 1733113499407Closed at 1733113499407 2024-12-02T04:24:59,409 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:24:59,409 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733113499409"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113499409"}]},"ts":"1733113499409"} 2024-12-02T04:24:59,409 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733113499409"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113499409"}]},"ts":"1733113499409"} 2024-12-02T04:24:59,412 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:24:59,413 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:24:59,413 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113499413"}]},"ts":"1733113499413"} 2024-12-02T04:24:59,415 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T04:24:59,415 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:24:59,417 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:24:59,417 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:24:59,417 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:24:59,417 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:24:59,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, ASSIGN}] 2024-12-02T04:24:59,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, ASSIGN 2024-12-02T04:24:59,419 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, ASSIGN 2024-12-02T04:24:59,420 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:24:59,420 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:24:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T04:24:59,570 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:24:59,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6e4aa4e7a9b1749bcb2f3bbe50e7512f, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:59,571 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=82326a9d346c15c1e5d33a921b08618a, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:59,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, ASSIGN because future has completed 2024-12-02T04:24:59,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:24:59,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, ASSIGN because future has completed 2024-12-02T04:24:59,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:24:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T04:24:59,736 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,737 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 6e4aa4e7a9b1749bcb2f3bbe50e7512f, NAME => 'testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:24:59,737 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,738 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 82326a9d346c15c1e5d33a921b08618a, NAME => 'testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:24:59,738 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. service=AccessControlService 2024-12-02T04:24:59,738 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. service=AccessControlService 2024-12-02T04:24:59,738 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:59,738 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,739 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,741 INFO [StoreOpener-82326a9d346c15c1e5d33a921b08618a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,741 INFO [StoreOpener-6e4aa4e7a9b1749bcb2f3bbe50e7512f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,743 INFO [StoreOpener-6e4aa4e7a9b1749bcb2f3bbe50e7512f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e4aa4e7a9b1749bcb2f3bbe50e7512f columnFamilyName cf 2024-12-02T04:24:59,743 INFO [StoreOpener-82326a9d346c15c1e5d33a921b08618a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82326a9d346c15c1e5d33a921b08618a columnFamilyName cf 2024-12-02T04:24:59,744 DEBUG [StoreOpener-82326a9d346c15c1e5d33a921b08618a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:59,744 DEBUG [StoreOpener-6e4aa4e7a9b1749bcb2f3bbe50e7512f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:24:59,744 INFO [StoreOpener-82326a9d346c15c1e5d33a921b08618a-1 {}] regionserver.HStore(327): Store=82326a9d346c15c1e5d33a921b08618a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:59,744 INFO [StoreOpener-6e4aa4e7a9b1749bcb2f3bbe50e7512f-1 {}] regionserver.HStore(327): Store=6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:24:59,745 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,745 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,745 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,745 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,745 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,747 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,747 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,749 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:59,749 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:24:59,750 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 82326a9d346c15c1e5d33a921b08618a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73572368, jitterRate=0.0963137149810791}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:59,750 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 6e4aa4e7a9b1749bcb2f3bbe50e7512f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61876217, jitterRate=-0.07797251641750336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:24:59,750 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:24:59,750 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:24:59,750 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f: Running coprocessor pre-open hook at 1733113499739Writing region info on filesystem at 1733113499739Initializing all the Stores at 1733113499741 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113499741Cleaning up temporary data from old regions at 1733113499746 (+5 ms)Running coprocessor post-open hooks at 1733113499750 (+4 ms)Region opened successfully at 1733113499750 2024-12-02T04:24:59,750 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 82326a9d346c15c1e5d33a921b08618a: Running coprocessor pre-open hook at 1733113499739Writing region info on filesystem at 1733113499739Initializing all the Stores at 1733113499741 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113499741Cleaning up temporary data from old regions at 1733113499746 (+5 ms)Running coprocessor post-open hooks at 1733113499750 (+4 ms)Region opened successfully at 1733113499750 2024-12-02T04:24:59,751 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f., pid=99, masterSystemTime=1733113499728 2024-12-02T04:24:59,751 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a., pid=100, masterSystemTime=1733113499729 2024-12-02T04:24:59,752 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,753 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:24:59,753 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6e4aa4e7a9b1749bcb2f3bbe50e7512f, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:24:59,753 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,753 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,754 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=82326a9d346c15c1e5d33a921b08618a, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:24:59,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:24:59,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:24:59,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-02T04:24:59,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088 in 182 msec 2024-12-02T04:24:59,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-02T04:24:59,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258 in 181 msec 2024-12-02T04:24:59,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, ASSIGN in 341 msec 2024-12-02T04:24:59,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-02T04:24:59,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, ASSIGN in 343 msec 2024-12-02T04:24:59,762 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:24:59,762 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113499762"}]},"ts":"1733113499762"} 2024-12-02T04:24:59,764 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T04:24:59,765 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:24:59,765 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-02T04:24:59,768 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:24:59,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:24:59,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:59,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:59,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:59,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:24:59,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 462 msec 2024-12-02T04:24:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T04:24:59,988 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T04:24:59,988 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:24:59,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-02T04:24:59,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:24:59,991 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:24:59,992 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:24:59,998 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,003 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,006 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113500006 (current time:1733113500006). 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6873f33a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:00,007 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:00,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:00,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:00,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cf35765, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:00,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:00,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,009 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:00,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70f105ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:00,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:00,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,012 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,013 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:25:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,013 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b3931e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:00,015 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:00,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:00,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:00,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c8c105a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:00,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:00,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,016 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60602, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:00,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@626d9d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:00,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:00,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,019 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:25:00,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,022 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:25:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,023 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:25:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:25:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:25:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T04:25:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T04:25:00,026 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:25:00,027 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:25:00,030 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:25:00,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742040_1216 (size=170) 2024-12-02T04:25:00,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742040_1216 (size=170) 2024-12-02T04:25:00,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742040_1216 (size=170) 2024-12-02T04:25:00,036 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:25:00,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f}] 2024-12-02T04:25:00,037 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:00,037 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T04:25:00,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-02T04:25:00,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f: 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 82326a9d346c15c1e5d33a921b08618a: 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:00,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:00,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:25:00,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:25:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742041_1217 (size=71) 2024-12-02T04:25:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742042_1218 (size=71) 2024-12-02T04:25:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742041_1217 (size=71) 2024-12-02T04:25:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742042_1218 (size=71) 2024-12-02T04:25:00,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742041_1217 (size=71) 2024-12-02T04:25:00,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-02T04:25:00,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742042_1218 (size=71) 2024-12-02T04:25:00,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:00,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-02T04:25:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-02T04:25:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-02T04:25:00,200 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:00,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,200 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:00,200 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f in 165 msec 2024-12-02T04:25:00,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-02T04:25:00,203 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:25:00,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a in 165 msec 2024-12-02T04:25:00,204 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:25:00,205 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:25:00,205 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:25:00,205 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:00,205 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:25:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742043_1219 (size=63) 2024-12-02T04:25:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742043_1219 (size=63) 2024-12-02T04:25:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742043_1219 (size=63) 2024-12-02T04:25:00,212 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:25:00,212 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:00,213 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742044_1220 (size=653) 2024-12-02T04:25:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742044_1220 (size=653) 2024-12-02T04:25:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742044_1220 (size=653) 2024-12-02T04:25:00,222 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:25:00,226 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:25:00,227 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:00,228 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:25:00,228 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T04:25:00,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 204 msec 2024-12-02T04:25:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T04:25:00,348 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T04:25:00,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:25:00,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:25:00,357 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-02T04:25:00,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:00,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:25:00,362 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,368 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,377 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:25:00,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:25:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113500380 (current time:1733113500380). 2024-12-02T04:25:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:25:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T04:25:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:25:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d868963, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:00,382 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:00,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:00,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:00,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f38994, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:00,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:00,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,384 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:00,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@157af22f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:00,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:00,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,391 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@270e3db1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:00,393 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:00,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:00,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:00,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78553cb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:00,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:00,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,395 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:00,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33961f10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:00,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:00,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:25:00,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:00,403 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40686, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:00,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:00,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:25:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:25:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:25:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T04:25:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T04:25:00,407 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:25:00,409 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:25:00,411 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:25:00,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742045_1221 (size=165) 2024-12-02T04:25:00,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742045_1221 (size=165) 2024-12-02T04:25:00,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742045_1221 (size=165) 2024-12-02T04:25:00,421 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:25:00,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f}] 2024-12-02T04:25:00,422 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,422 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T04:25:00,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-02T04:25:00,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-02T04:25:00,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:00,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:00,574 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 82326a9d346c15c1e5d33a921b08618a 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-02T04:25:00,574 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 6e4aa4e7a9b1749bcb2f3bbe50e7512f 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-02T04:25:00,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a is 71, key is 09653df1fe01e58d069aa16643df4606/cf:q/1733113500354/Put/seqid=0 2024-12-02T04:25:00,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f is 71, key is 14a002db0fd64d2176ea8a90cab16ce1/cf:q/1733113500355/Put/seqid=0 2024-12-02T04:25:00,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742046_1222 (size=5032) 2024-12-02T04:25:00,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742046_1222 (size=5032) 2024-12-02T04:25:00,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742046_1222 (size=5032) 2024-12-02T04:25:00,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:00,610 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/.tmp/cf/26ecc4754d354bd58e257d36c4a8b34a, store: [table=testtb-testExportFileSystemState family=cf region=82326a9d346c15c1e5d33a921b08618a] 2024-12-02T04:25:00,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/.tmp/cf/26ecc4754d354bd58e257d36c4a8b34a is 209, key is 00b91d1ddf9dc1e5c19ae9d22759bcddd/cf:q/1733113500354/Put/seqid=0 2024-12-02T04:25:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742047_1223 (size=8241) 2024-12-02T04:25:00,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742047_1223 (size=8241) 2024-12-02T04:25:00,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742047_1223 (size=8241) 2024-12-02T04:25:00,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742048_1224 (size=5709) 2024-12-02T04:25:00,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742048_1224 (size=5709) 2024-12-02T04:25:00,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742048_1224 (size=5709) 2024-12-02T04:25:00,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/.tmp/cf/26ecc4754d354bd58e257d36c4a8b34a 2024-12-02T04:25:00,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/.tmp/cf/26ecc4754d354bd58e257d36c4a8b34a as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a 2024-12-02T04:25:00,644 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a, entries=2, sequenceid=6, filesize=5.6 K 2024-12-02T04:25:00,645 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 82326a9d346c15c1e5d33a921b08618a in 71ms, sequenceid=6, compaction requested=false 2024-12-02T04:25:00,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 82326a9d346c15c1e5d33a921b08618a: 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. for snaptb0-testExportFileSystemState completed. 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a] hfiles 2024-12-02T04:25:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a for snapshot=snaptb0-testExportFileSystemState 2024-12-02T04:25:00,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742049_1225 (size=110) 2024-12-02T04:25:00,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742049_1225 (size=110) 2024-12-02T04:25:00,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742049_1225 (size=110) 2024-12-02T04:25:00,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:00,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-02T04:25:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-02T04:25:00,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,658 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:00,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 82326a9d346c15c1e5d33a921b08618a in 237 msec 2024-12-02T04:25:00,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T04:25:01,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:01,032 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:01,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/.tmp/cf/07b83b43273048708156b7b315d0b15b, store: [table=testtb-testExportFileSystemState family=cf region=6e4aa4e7a9b1749bcb2f3bbe50e7512f] 2024-12-02T04:25:01,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/.tmp/cf/07b83b43273048708156b7b315d0b15b is 209, key is 18378a6309c5a5ed839c374dad1a48928/cf:q/1733113500355/Put/seqid=0 2024-12-02T04:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T04:25:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742050_1226 (size=15202) 2024-12-02T04:25:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742050_1226 (size=15202) 2024-12-02T04:25:01,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742050_1226 (size=15202) 2024-12-02T04:25:01,039 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/.tmp/cf/07b83b43273048708156b7b315d0b15b 2024-12-02T04:25:01,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/.tmp/cf/07b83b43273048708156b7b315d0b15b as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b 2024-12-02T04:25:01,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b, entries=48, sequenceid=6, filesize=14.8 K 2024-12-02T04:25:01,051 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6e4aa4e7a9b1749bcb2f3bbe50e7512f in 477ms, sequenceid=6, compaction requested=false 2024-12-02T04:25:01,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f: 2024-12-02T04:25:01,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. for snaptb0-testExportFileSystemState completed. 2024-12-02T04:25:01,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T04:25:01,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:01,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b] hfiles 2024-12-02T04:25:01,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b for snapshot=snaptb0-testExportFileSystemState 2024-12-02T04:25:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742051_1227 (size=110) 2024-12-02T04:25:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742051_1227 (size=110) 2024-12-02T04:25:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742051_1227 (size=110) 2024-12-02T04:25:01,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:01,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-02T04:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-02T04:25:01,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:01,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:01,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-02T04:25:01,062 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:25:01,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f in 639 msec 2024-12-02T04:25:01,062 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:25:01,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:25:01,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:25:01,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:01,064 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a] hfiles 2024-12-02T04:25:01,065 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:01,065 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742052_1228 (size=294) 2024-12-02T04:25:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742052_1228 (size=294) 2024-12-02T04:25:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742052_1228 (size=294) 2024-12-02T04:25:01,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:25:01,072 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-02T04:25:01,073 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T04:25:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742053_1229 (size=963) 2024-12-02T04:25:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742053_1229 (size=963) 2024-12-02T04:25:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742053_1229 (size=963) 2024-12-02T04:25:01,083 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:25:01,089 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:25:01,089 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T04:25:01,091 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:25:01,091 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T04:25:01,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 686 msec 2024-12-02T04:25:01,264 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000005/launch_container.sh] 2024-12-02T04:25:01,264 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000005/container_tokens] 2024-12-02T04:25:01,264 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000005/sysfs] 2024-12-02T04:25:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T04:25:01,549 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T04:25:01,549 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549 2024-12-02T04:25:01,549 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:01,576 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:01,576 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T04:25:01,578 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:25:01,584 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T04:25:01,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742054_1230 (size=165) 2024-12-02T04:25:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742054_1230 (size=165) 2024-12-02T04:25:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742054_1230 (size=165) 2024-12-02T04:25:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742055_1231 (size=963) 2024-12-02T04:25:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742055_1231 (size=963) 2024-12-02T04:25:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742055_1231 (size=963) 2024-12-02T04:25:01,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:01,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:01,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T04:25:01,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T04:25:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T04:25:01,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T04:25:02,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-15719110024204676688.jar 2024-12-02T04:25:02,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,515 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0003_000001 (auth:SIMPLE) from 127.0.0.1:50486 2024-12-02T04:25:02,524 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000001/launch_container.sh] 2024-12-02T04:25:02,524 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000001/container_tokens] 2024-12-02T04:25:02,524 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0003/container_1733113348655_0003_01_000001/sysfs] 2024-12-02T04:25:02,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-5028639148147857425.jar 2024-12-02T04:25:02,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:25:02,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:25:02,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:25:02,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:25:02,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:25:02,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:25:02,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:25:02,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:25:02,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:25:02,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:02,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:02,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:02,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:02,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:02,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:02,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742056_1232 (size=131440) 2024-12-02T04:25:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742056_1232 (size=131440) 2024-12-02T04:25:02,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742056_1232 (size=131440) 2024-12-02T04:25:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T04:25:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T04:25:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T04:25:02,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T04:25:02,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T04:25:02,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T04:25:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742059_1235 (size=903927) 2024-12-02T04:25:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742059_1235 (size=903927) 2024-12-02T04:25:02,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742059_1235 (size=903927) 2024-12-02T04:25:02,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T04:25:02,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T04:25:02,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T04:25:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T04:25:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T04:25:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T04:25:02,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742062_1238 (size=77835) 2024-12-02T04:25:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742062_1238 (size=77835) 2024-12-02T04:25:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742062_1238 (size=77835) 2024-12-02T04:25:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742063_1239 (size=30949) 2024-12-02T04:25:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742063_1239 (size=30949) 2024-12-02T04:25:02,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742063_1239 (size=30949) 2024-12-02T04:25:02,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T04:25:02,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T04:25:02,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T04:25:02,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742065_1241 (size=4695811) 2024-12-02T04:25:02,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742065_1241 (size=4695811) 2024-12-02T04:25:02,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742065_1241 (size=4695811) 2024-12-02T04:25:02,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742066_1242 (size=232957) 2024-12-02T04:25:02,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742066_1242 (size=232957) 2024-12-02T04:25:02,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742066_1242 (size=232957) 2024-12-02T04:25:02,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742067_1243 (size=127628) 2024-12-02T04:25:02,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742067_1243 (size=127628) 2024-12-02T04:25:02,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742067_1243 (size=127628) 2024-12-02T04:25:02,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742068_1244 (size=20406) 2024-12-02T04:25:02,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742068_1244 (size=20406) 2024-12-02T04:25:02,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742068_1244 (size=20406) 2024-12-02T04:25:02,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742069_1245 (size=5175431) 2024-12-02T04:25:02,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742069_1245 (size=5175431) 2024-12-02T04:25:02,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742069_1245 (size=5175431) 2024-12-02T04:25:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742070_1246 (size=6424732) 2024-12-02T04:25:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742070_1246 (size=6424732) 2024-12-02T04:25:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742070_1246 (size=6424732) 2024-12-02T04:25:02,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742071_1247 (size=217634) 2024-12-02T04:25:02,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742071_1247 (size=217634) 2024-12-02T04:25:02,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742071_1247 (size=217634) 2024-12-02T04:25:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T04:25:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T04:25:02,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T04:25:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742073_1249 (size=322274) 2024-12-02T04:25:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742073_1249 (size=322274) 2024-12-02T04:25:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742073_1249 (size=322274) 2024-12-02T04:25:02,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742074_1250 (size=503880) 2024-12-02T04:25:02,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742074_1250 (size=503880) 2024-12-02T04:25:02,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742074_1250 (size=503880) 2024-12-02T04:25:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742075_1251 (size=440957) 2024-12-02T04:25:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742075_1251 (size=440957) 2024-12-02T04:25:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742075_1251 (size=440957) 2024-12-02T04:25:02,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742076_1252 (size=29229) 2024-12-02T04:25:02,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742076_1252 (size=29229) 2024-12-02T04:25:02,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742076_1252 (size=29229) 2024-12-02T04:25:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742077_1253 (size=24096) 2024-12-02T04:25:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742077_1253 (size=24096) 2024-12-02T04:25:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742077_1253 (size=24096) 2024-12-02T04:25:02,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742078_1254 (size=111872) 2024-12-02T04:25:02,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742078_1254 (size=111872) 2024-12-02T04:25:02,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742078_1254 (size=111872) 2024-12-02T04:25:02,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742079_1255 (size=45609) 2024-12-02T04:25:02,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742079_1255 (size=45609) 2024-12-02T04:25:02,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742079_1255 (size=45609) 2024-12-02T04:25:02,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742080_1256 (size=136454) 2024-12-02T04:25:02,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742080_1256 (size=136454) 2024-12-02T04:25:02,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742080_1256 (size=136454) 2024-12-02T04:25:02,959 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:25:02,960 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-02T04:25:02,962 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.8 K 2024-12-02T04:25:02,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T04:25:02,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-02T04:25:02,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-02T04:25:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742081_1257 (size=1035) 2024-12-02T04:25:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742081_1257 (size=1035) 2024-12-02T04:25:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742081_1257 (size=1035) 2024-12-02T04:25:02,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742082_1258 (size=35) 2024-12-02T04:25:02,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742082_1258 (size=35) 2024-12-02T04:25:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742082_1258 (size=35) 2024-12-02T04:25:03,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742083_1259 (size=304004) 2024-12-02T04:25:03,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742083_1259 (size=304004) 2024-12-02T04:25:03,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742083_1259 (size=304004) 2024-12-02T04:25:03,019 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:03,020 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:03,258 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:56556 2024-12-02T04:25:04,236 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:25:08,473 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:55266 2024-12-02T04:25:08,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742084_1260 (size=349702) 2024-12-02T04:25:08,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742084_1260 (size=349702) 2024-12-02T04:25:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742084_1260 (size=349702) 2024-12-02T04:25:10,754 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:56566 2024-12-02T04:25:10,759 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:50498 2024-12-02T04:25:10,762 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:50502 2024-12-02T04:25:11,583 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:56578 2024-12-02T04:25:14,520 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:25:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742085_1261 (size=8241) 2024-12-02T04:25:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742085_1261 (size=8241) 2024-12-02T04:25:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742085_1261 (size=8241) 2024-12-02T04:25:17,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000003/launch_container.sh] 2024-12-02T04:25:17,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000003/container_tokens] 2024-12-02T04:25:17,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000003/sysfs] 2024-12-02T04:25:18,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742087_1263 (size=5032) 2024-12-02T04:25:18,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742087_1263 (size=5032) 2024-12-02T04:25:18,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742087_1263 (size=5032) 2024-12-02T04:25:18,801 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000005/launch_container.sh] 2024-12-02T04:25:18,802 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000005/container_tokens] 2024-12-02T04:25:18,802 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000005/sysfs] 2024-12-02T04:25:19,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742088_1264 (size=5709) 2024-12-02T04:25:19,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742088_1264 (size=5709) 2024-12-02T04:25:19,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742088_1264 (size=5709) 2024-12-02T04:25:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742089_1265 (size=15202) 2024-12-02T04:25:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742089_1265 (size=15202) 2024-12-02T04:25:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742089_1265 (size=15202) 2024-12-02T04:25:19,503 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000004/launch_container.sh] 2024-12-02T04:25:19,503 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000004/container_tokens] 2024-12-02T04:25:19,503 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000004/sysfs] 2024-12-02T04:25:19,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742086_1262 (size=31756) 2024-12-02T04:25:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742086_1262 (size=31756) 2024-12-02T04:25:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742086_1262 (size=31756) 2024-12-02T04:25:19,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742090_1266 (size=466) 2024-12-02T04:25:19,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742090_1266 (size=466) 2024-12-02T04:25:19,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742090_1266 (size=466) 2024-12-02T04:25:19,917 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000002/launch_container.sh] 2024-12-02T04:25:19,917 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000002/container_tokens] 2024-12-02T04:25:19,918 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000002/sysfs] 2024-12-02T04:25:19,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742091_1267 (size=31756) 2024-12-02T04:25:19,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742091_1267 (size=31756) 2024-12-02T04:25:19,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742091_1267 (size=31756) 2024-12-02T04:25:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742092_1268 (size=349702) 2024-12-02T04:25:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742092_1268 (size=349702) 2024-12-02T04:25:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742092_1268 (size=349702) 2024-12-02T04:25:20,012 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:39894 2024-12-02T04:25:20,563 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:25:21,217 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:25:21,217 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:25:21,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-02T04:25:21,254 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:25:21,255 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:25:21,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T04:25:21,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T04:25:21,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T04:25:21,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T04:25:21,257 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T04:25:21,257 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113501549/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T04:25:21,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-02T04:25:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,297 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113521297"}]},"ts":"1733113521297"} 2024-12-02T04:25:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T04:25:21,301 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T04:25:21,302 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-02T04:25:21,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-02T04:25:21,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, UNASSIGN}] 2024-12-02T04:25:21,309 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, UNASSIGN 2024-12-02T04:25:21,310 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, UNASSIGN 2024-12-02T04:25:21,311 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6e4aa4e7a9b1749bcb2f3bbe50e7512f, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:25:21,313 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=e2eaa0f11f7e,34291,1733113342258, table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T04:25:21,311 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=82326a9d346c15c1e5d33a921b08618a, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:25:21,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, UNASSIGN because future has completed 2024-12-02T04:25:21,318 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:25:21,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:25:21,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, UNASSIGN because future has completed 2024-12-02T04:25:21,321 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:25:21,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:25:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T04:25:21,474 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:21,474 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:25:21,474 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 82326a9d346c15c1e5d33a921b08618a, disabling compactions & flushes 2024-12-02T04:25:21,475 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:21,475 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:21,475 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. after waiting 0 ms 2024-12-02T04:25:21,475 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:21,476 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:21,477 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:25:21,477 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 6e4aa4e7a9b1749bcb2f3bbe50e7512f, disabling compactions & flushes 2024-12-02T04:25:21,477 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:21,477 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:21,477 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. after waiting 0 ms 2024-12-02T04:25:21,477 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:21,491 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:25:21,492 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:25:21,492 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a. 2024-12-02T04:25:21,492 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 82326a9d346c15c1e5d33a921b08618a: Waiting for close lock at 1733113521474Running coprocessor pre-close hooks at 1733113521474Disabling compacts and flushes for region at 1733113521474Disabling writes for close at 1733113521475 (+1 ms)Writing region close event to WAL at 1733113521481 (+6 ms)Running coprocessor post-close hooks at 1733113521492 (+11 ms)Closed at 1733113521492 2024-12-02T04:25:21,494 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:21,495 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=82326a9d346c15c1e5d33a921b08618a, regionState=CLOSED 2024-12-02T04:25:21,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:25:21,504 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:25:21,505 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:25:21,505 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f. 2024-12-02T04:25:21,505 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 6e4aa4e7a9b1749bcb2f3bbe50e7512f: Waiting for close lock at 1733113521477Running coprocessor pre-close hooks at 1733113521477Disabling compacts and flushes for region at 1733113521477Disabling writes for close at 1733113521477Writing region close event to WAL at 1733113521486 (+9 ms)Running coprocessor post-close hooks at 1733113521505 (+19 ms)Closed at 1733113521505 2024-12-02T04:25:21,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-02T04:25:21,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 82326a9d346c15c1e5d33a921b08618a, server=e2eaa0f11f7e,34291,1733113342258 in 181 msec 2024-12-02T04:25:21,507 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:21,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=82326a9d346c15c1e5d33a921b08618a, UNASSIGN in 199 msec 2024-12-02T04:25:21,507 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6e4aa4e7a9b1749bcb2f3bbe50e7512f, regionState=CLOSED 2024-12-02T04:25:21,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:25:21,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-02T04:25:21,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 6e4aa4e7a9b1749bcb2f3bbe50e7512f, server=e2eaa0f11f7e,41643,1733113342088 in 195 msec 2024-12-02T04:25:21,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-02T04:25:21,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e4aa4e7a9b1749bcb2f3bbe50e7512f, UNASSIGN in 213 msec 2024-12-02T04:25:21,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-02T04:25:21,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 220 msec 2024-12-02T04:25:21,530 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113521530"}]},"ts":"1733113521530"} 2024-12-02T04:25:21,532 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T04:25:21,533 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-02T04:25:21,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 246 msec 2024-12-02T04:25:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T04:25:21,619 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T04:25:21,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-02T04:25:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,622 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-02T04:25:21,624 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-02T04:25:21,639 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:21,639 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:21,641 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/recovered.edits] 2024-12-02T04:25:21,641 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/recovered.edits] 2024-12-02T04:25:21,648 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/cf/26ecc4754d354bd58e257d36c4a8b34a 2024-12-02T04:25:21,650 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/cf/07b83b43273048708156b7b315d0b15b 2024-12-02T04:25:21,655 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f/recovered.edits/9.seqid 2024-12-02T04:25:21,655 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a/recovered.edits/9.seqid 2024-12-02T04:25:21,656 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:21,656 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemState/82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:21,657 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-02T04:25:21,657 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-02T04:25:21,659 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-02T04:25:21,665 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120290f29931dbfe437b98f316ad0c5de38e_6e4aa4e7a9b1749bcb2f3bbe50e7512f 2024-12-02T04:25:21,668 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241202c58aa9978c474062923268b9a029c01e_82326a9d346c15c1e5d33a921b08618a 2024-12-02T04:25:21,669 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-02T04:25:21,672 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T04:25:21,677 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-02T04:25:21,680 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-02T04:25:21,682 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,682 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-02T04:25:21,682 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113521682"}]},"ts":"9223372036854775807"} 2024-12-02T04:25:21,682 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113521682"}]},"ts":"9223372036854775807"} 2024-12-02T04:25:21,685 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:25:21,685 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 82326a9d346c15c1e5d33a921b08618a, NAME => 'testtb-testExportFileSystemState,,1733113499359.82326a9d346c15c1e5d33a921b08618a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6e4aa4e7a9b1749bcb2f3bbe50e7512f, NAME => 'testtb-testExportFileSystemState,1,1733113499359.6e4aa4e7a9b1749bcb2f3bbe50e7512f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:25:21,685 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-02T04:25:21,686 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113521685"}]},"ts":"9223372036854775807"} 2024-12-02T04:25:21,688 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-02T04:25:21,689 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T04:25:21,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 69 msec 2024-12-02T04:25:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T04:25:21,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T04:25:21,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T04:25:21,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:21,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-02T04:25:21,711 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-02T04:25:21,711 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T04:25:21,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T04:25:21,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-02T04:25:21,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T04:25:21,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-02T04:25:21,758 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=802 (was 799) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:45574 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:51806 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1795037872_1 at /127.0.0.1:51788 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 133033) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33345 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:41199 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3893 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:33345 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:47382 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1795037872_1 at /127.0.0.1:47370 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:41199 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=815 (was 821), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=824 (was 711) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=2831 (was 1980) - AvailableMemoryMB LEAK? - 2024-12-02T04:25:21,758 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-02T04:25:21,792 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=802, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=824, ProcessCount=23, AvailableMemoryMB=2829 2024-12-02T04:25:21,792 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-02T04:25:21,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:25:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:25:21,797 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:25:21,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-02T04:25:21,798 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:25:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T04:25:21,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742093_1269 (size=440) 2024-12-02T04:25:21,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742093_1269 (size=440) 2024-12-02T04:25:21,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742093_1269 (size=440) 2024-12-02T04:25:21,830 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 46028d43f5fe50f062bcfc6ac75c12d4, NAME => 'testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:21,831 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f6f53daf25c27da05945d00784d9b96e, NAME => 'testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:21,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742094_1270 (size=65) 2024-12-02T04:25:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742094_1270 (size=65) 2024-12-02T04:25:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742094_1270 (size=65) 2024-12-02T04:25:21,864 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:25:21,864 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 46028d43f5fe50f062bcfc6ac75c12d4, disabling compactions & flushes 2024-12-02T04:25:21,864 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:21,864 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:21,865 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. after waiting 0 ms 2024-12-02T04:25:21,865 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:21,865 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:21,865 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 46028d43f5fe50f062bcfc6ac75c12d4: Waiting for close lock at 1733113521864Disabling compacts and flushes for region at 1733113521864Disabling writes for close at 1733113521865 (+1 ms)Writing region close event to WAL at 1733113521865Closed at 1733113521865 2024-12-02T04:25:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742095_1271 (size=65) 2024-12-02T04:25:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742095_1271 (size=65) 2024-12-02T04:25:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742095_1271 (size=65) 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing f6f53daf25c27da05945d00784d9b96e, disabling compactions & flushes 2024-12-02T04:25:21,874 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. after waiting 0 ms 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:21,874 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:21,874 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for f6f53daf25c27da05945d00784d9b96e: Waiting for close lock at 1733113521874Disabling compacts and flushes for region at 1733113521874Disabling writes for close at 1733113521874Writing region close event to WAL at 1733113521874Closed at 1733113521874 2024-12-02T04:25:21,875 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:25:21,876 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113521875"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113521875"}]},"ts":"1733113521875"} 2024-12-02T04:25:21,876 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113521875"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113521875"}]},"ts":"1733113521875"} 2024-12-02T04:25:21,879 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:25:21,880 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:25:21,880 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113521880"}]},"ts":"1733113521880"} 2024-12-02T04:25:21,883 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-02T04:25:21,883 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:25:21,884 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:25:21,885 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:25:21,885 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:25:21,885 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:25:21,885 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:25:21,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, ASSIGN}] 2024-12-02T04:25:21,886 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, ASSIGN 2024-12-02T04:25:21,886 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, ASSIGN 2024-12-02T04:25:21,888 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:25:21,888 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:25:21,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T04:25:22,038 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:25:22,039 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=46028d43f5fe50f062bcfc6ac75c12d4, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:25:22,039 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=f6f53daf25c27da05945d00784d9b96e, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:25:22,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, ASSIGN because future has completed 2024-12-02T04:25:22,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:25:22,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, ASSIGN because future has completed 2024-12-02T04:25:22,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:25:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T04:25:22,197 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,197 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 46028d43f5fe50f062bcfc6ac75c12d4, NAME => 'testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:25:22,197 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:22,197 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => f6f53daf25c27da05945d00784d9b96e, NAME => 'testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:25:22,197 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. service=AccessControlService 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. service=AccessControlService 2024-12-02T04:25:22,198 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:25:22,198 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,198 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,200 INFO [StoreOpener-46028d43f5fe50f062bcfc6ac75c12d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,200 INFO [StoreOpener-f6f53daf25c27da05945d00784d9b96e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,201 INFO [StoreOpener-46028d43f5fe50f062bcfc6ac75c12d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46028d43f5fe50f062bcfc6ac75c12d4 columnFamilyName cf 2024-12-02T04:25:22,201 INFO [StoreOpener-f6f53daf25c27da05945d00784d9b96e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f6f53daf25c27da05945d00784d9b96e columnFamilyName cf 2024-12-02T04:25:22,202 DEBUG [StoreOpener-46028d43f5fe50f062bcfc6ac75c12d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:22,202 DEBUG [StoreOpener-f6f53daf25c27da05945d00784d9b96e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:22,203 INFO [StoreOpener-f6f53daf25c27da05945d00784d9b96e-1 {}] regionserver.HStore(327): Store=f6f53daf25c27da05945d00784d9b96e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:25:22,203 INFO [StoreOpener-46028d43f5fe50f062bcfc6ac75c12d4-1 {}] regionserver.HStore(327): Store=46028d43f5fe50f062bcfc6ac75c12d4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:25:22,203 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,203 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,204 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,206 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,206 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,211 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:25:22,211 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:25:22,212 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 46028d43f5fe50f062bcfc6ac75c12d4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73340488, jitterRate=0.09285843372344971}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:25:22,212 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened f6f53daf25c27da05945d00784d9b96e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60376271, jitterRate=-0.10032345354557037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:25:22,212 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,212 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,212 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 46028d43f5fe50f062bcfc6ac75c12d4: Running coprocessor pre-open hook at 1733113522198Writing region info on filesystem at 1733113522198Initializing all the Stores at 1733113522199 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113522199Cleaning up temporary data from old regions at 1733113522204 (+5 ms)Running coprocessor post-open hooks at 1733113522212 (+8 ms)Region opened successfully at 1733113522212 2024-12-02T04:25:22,212 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for f6f53daf25c27da05945d00784d9b96e: Running coprocessor pre-open hook at 1733113522198Writing region info on filesystem at 1733113522198Initializing all the Stores at 1733113522199 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113522199Cleaning up temporary data from old regions at 1733113522204 (+5 ms)Running coprocessor post-open hooks at 1733113522212 (+8 ms)Region opened successfully at 1733113522212 2024-12-02T04:25:22,213 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4., pid=117, masterSystemTime=1733113522193 2024-12-02T04:25:22,213 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e., pid=118, masterSystemTime=1733113522195 2024-12-02T04:25:22,215 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,215 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,216 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=46028d43f5fe50f062bcfc6ac75c12d4, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:25:22,216 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:22,216 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:22,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=f6f53daf25c27da05945d00784d9b96e, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:25:22,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:25:22,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:25:22,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-02T04:25:22,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258 in 179 msec 2024-12-02T04:25:22,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-02T04:25:22,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, ASSIGN in 337 msec 2024-12-02T04:25:22,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088 in 179 msec 2024-12-02T04:25:22,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-02T04:25:22,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, ASSIGN in 339 msec 2024-12-02T04:25:22,228 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:25:22,228 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113522228"}]},"ts":"1733113522228"} 2024-12-02T04:25:22,230 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-02T04:25:22,232 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:25:22,232 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-02T04:25:22,236 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T04:25:22,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:22,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:22,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:22,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:25:22,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:25:22,308 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T04:25:22,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T04:25:22,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:25:22,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T04:25:22,316 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T04:25:22,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 520 msec 2024-12-02T04:25:22,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T04:25:22,428 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T04:25:22,428 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-02T04:25:22,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,431 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:25:22,433 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,439 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,447 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T04:25:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113522450 (current time:1733113522450). 2024-12-02T04:25:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:25:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T04:25:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:25:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@740b9af2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:22,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:22,455 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:22,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:22,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:22,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62028d90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:22,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:22,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,457 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:22,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ddaaa7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:22,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:22,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,463 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ee32a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:22,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:22,465 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:22,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:22,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:22,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@294ad13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:22,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:22,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,467 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39156, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:22,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6875cfdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:22,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:22,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:25:22,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,474 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T04:25:22,475 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:25:22,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T04:25:22,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T04:25:22,478 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:25:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T04:25:22,479 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:25:22,482 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:25:22,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742096_1272 (size=161) 2024-12-02T04:25:22,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742096_1272 (size=161) 2024-12-02T04:25:22,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742096_1272 (size=161) 2024-12-02T04:25:22,492 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:25:22,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e}] 2024-12-02T04:25:22,494 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,494 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T04:25:22,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-02T04:25:22,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-02T04:25:22,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 46028d43f5fe50f062bcfc6ac75c12d4: 2024-12-02T04:25:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T04:25:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T04:25:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:25:22,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for f6f53daf25c27da05945d00784d9b96e: 2024-12-02T04:25:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T04:25:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T04:25:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:25:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742097_1273 (size=68) 2024-12-02T04:25:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742097_1273 (size=68) 2024-12-02T04:25:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742097_1273 (size=68) 2024-12-02T04:25:22,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-02T04:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-02T04:25:22,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,685 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:22,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 in 194 msec 2024-12-02T04:25:22,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742098_1274 (size=68) 2024-12-02T04:25:22,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742098_1274 (size=68) 2024-12-02T04:25:22,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742098_1274 (size=68) 2024-12-02T04:25:22,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:22,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-02T04:25:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-02T04:25:22,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,692 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:22,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-02T04:25:22,698 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:25:22,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e in 202 msec 2024-12-02T04:25:22,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:25:22,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:25:22,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:25:22,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:22,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:25:22,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742099_1275 (size=60) 2024-12-02T04:25:22,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742099_1275 (size=60) 2024-12-02T04:25:22,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742099_1275 (size=60) 2024-12-02T04:25:22,718 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:25:22,718 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-02T04:25:22,719 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-02T04:25:22,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742100_1276 (size=641) 2024-12-02T04:25:22,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742100_1276 (size=641) 2024-12-02T04:25:22,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742100_1276 (size=641) 2024-12-02T04:25:22,736 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:25:22,743 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:25:22,743 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-02T04:25:22,745 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:25:22,745 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T04:25:22,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 269 msec 2024-12-02T04:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T04:25:22,798 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T04:25:22,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:25:22,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:25:22,806 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-02T04:25:22,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:22,809 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:25:22,811 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,816 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,823 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T04:25:22,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113522825 (current time:1733113522825). 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b890a9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:22,827 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:22,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:22,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:22,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b7d094b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:22,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:22,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,829 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:22,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a05274b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:22,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:22,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,833 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e1c1e91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:25:22,835 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:25:22,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:25:22,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:25:22,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d134ba3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:25:22,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:25:22,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,837 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:25:22,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f03157, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:25:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:25:22,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:25:22,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,840 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:25:22,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:25:22,843 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42722, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:25:22,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:25:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:25:22,845 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T04:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T04:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T04:25:22,850 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:25:22,853 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:25:22,856 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:25:22,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T04:25:22,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742101_1277 (size=156) 2024-12-02T04:25:22,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742101_1277 (size=156) 2024-12-02T04:25:22,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742101_1277 (size=156) 2024-12-02T04:25:22,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T04:25:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T04:25:23,298 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:25:23,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e}] 2024-12-02T04:25:23,299 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:23,299 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:23,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-02T04:25:23,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-02T04:25:23,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:23,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing f6f53daf25c27da05945d00784d9b96e 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-02T04:25:23,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:23,452 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 46028d43f5fe50f062bcfc6ac75c12d4 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-02T04:25:23,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e is 71, key is 1018bcb5f7ff530ac051f5fda836ef89/cf:q/1733113522805/Put/seqid=0 2024-12-02T04:25:23,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 is 71, key is 0168c1bdf58666d20d360e754141d037/cf:q/1733113522803/Put/seqid=0 2024-12-02T04:25:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742102_1278 (size=7891) 2024-12-02T04:25:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742102_1278 (size=7891) 2024-12-02T04:25:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742102_1278 (size=7891) 2024-12-02T04:25:23,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:23,483 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:23,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/.tmp/cf/0e1ca781b8eb4771bd1e1db281949b67, store: [table=testtb-testConsecutiveExports family=cf region=f6f53daf25c27da05945d00784d9b96e] 2024-12-02T04:25:23,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/.tmp/cf/0e1ca781b8eb4771bd1e1db281949b67 is 206, key is 13462bff2d00d66587eb9afe3ef49371d/cf:q/1733113522805/Put/seqid=0 2024-12-02T04:25:23,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742103_1279 (size=5381) 2024-12-02T04:25:23,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742103_1279 (size=5381) 2024-12-02T04:25:23,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742103_1279 (size=5381) 2024-12-02T04:25:23,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:23,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T04:25:23,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742104_1280 (size=14045) 2024-12-02T04:25:23,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742104_1280 (size=14045) 2024-12-02T04:25:23,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742104_1280 (size=14045) 2024-12-02T04:25:23,492 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/.tmp/cf/0e1ca781b8eb4771bd1e1db281949b67 2024-12-02T04:25:23,492 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:23,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/.tmp/cf/943305628dac48bf9d4eeacc7f4e1906, store: [table=testtb-testConsecutiveExports family=cf region=46028d43f5fe50f062bcfc6ac75c12d4] 2024-12-02T04:25:23,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/.tmp/cf/943305628dac48bf9d4eeacc7f4e1906 is 206, key is 0586259965c05e48c0dcae02920c320d8/cf:q/1733113522803/Put/seqid=0 2024-12-02T04:25:23,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/.tmp/cf/0e1ca781b8eb4771bd1e1db281949b67 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67 2024-12-02T04:25:23,501 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67, entries=43, sequenceid=6, filesize=13.7 K 2024-12-02T04:25:23,502 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for f6f53daf25c27da05945d00784d9b96e in 51ms, sequenceid=6, compaction requested=false 2024-12-02T04:25:23,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for f6f53daf25c27da05945d00784d9b96e: 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. for snaptb0-testConsecutiveExports completed. 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67] hfiles 2024-12-02T04:25:23,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67 for snapshot=snaptb0-testConsecutiveExports 2024-12-02T04:25:23,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742105_1281 (size=6720) 2024-12-02T04:25:23,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742105_1281 (size=6720) 2024-12-02T04:25:23,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742105_1281 (size=6720) 2024-12-02T04:25:23,504 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/.tmp/cf/943305628dac48bf9d4eeacc7f4e1906 2024-12-02T04:25:23,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742106_1282 (size=107) 2024-12-02T04:25:23,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742106_1282 (size=107) 2024-12-02T04:25:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742106_1282 (size=107) 2024-12-02T04:25:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:25:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-02T04:25:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/.tmp/cf/943305628dac48bf9d4eeacc7f4e1906 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906 2024-12-02T04:25:23,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-02T04:25:23,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:23,511 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:23,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f6f53daf25c27da05945d00784d9b96e in 213 msec 2024-12-02T04:25:23,515 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906, entries=7, sequenceid=6, filesize=6.6 K 2024-12-02T04:25:23,516 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 46028d43f5fe50f062bcfc6ac75c12d4 in 64ms, sequenceid=6, compaction requested=false 2024-12-02T04:25:23,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 46028d43f5fe50f062bcfc6ac75c12d4: 2024-12-02T04:25:23,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. for snaptb0-testConsecutiveExports completed. 2024-12-02T04:25:23,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T04:25:23,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:25:23,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906] hfiles 2024-12-02T04:25:23,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906 for snapshot=snaptb0-testConsecutiveExports 2024-12-02T04:25:23,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742107_1283 (size=107) 2024-12-02T04:25:23,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742107_1283 (size=107) 2024-12-02T04:25:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742107_1283 (size=107) 2024-12-02T04:25:23,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:25:23,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-02T04:25:23,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-02T04:25:23,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:23,523 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:23,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-12-02T04:25:23,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:25:23,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4 in 225 msec 2024-12-02T04:25:23,526 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:25:23,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:25:23,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:25:23,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:25:23,528 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4] hfiles 2024-12-02T04:25:23,528 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:25:23,528 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:25:23,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742108_1284 (size=291) 2024-12-02T04:25:23,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742108_1284 (size=291) 2024-12-02T04:25:23,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742108_1284 (size=291) 2024-12-02T04:25:23,537 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:25:23,537 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-02T04:25:23,538 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T04:25:23,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742109_1285 (size=951) 2024-12-02T04:25:23,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742109_1285 (size=951) 2024-12-02T04:25:23,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742109_1285 (size=951) 2024-12-02T04:25:23,547 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:25:23,552 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:25:23,552 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T04:25:23,554 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:25:23,554 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T04:25:23,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 708 msec 2024-12-02T04:25:23,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T04:25:23,997 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T04:25:23,998 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997 2024-12-02T04:25:23,998 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:24,023 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:24,023 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@530d277f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T04:25:24,025 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:25:24,038 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T04:25:24,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,876 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-5724170181751109869.jar 2024-12-02T04:25:24,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-8456641719950201659.jar 2024-12-02T04:25:24,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:24,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:25:24,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:25:24,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:25:24,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:25:24,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:25:24,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:25:24,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:25:24,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:25:24,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:25:24,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:25:24,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:25:24,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:24,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:24,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:24,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:24,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:24,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:24,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742110_1286 (size=131440) 2024-12-02T04:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742110_1286 (size=131440) 2024-12-02T04:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742110_1286 (size=131440) 2024-12-02T04:25:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T04:25:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T04:25:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T04:25:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T04:25:25,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T04:25:25,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T04:25:25,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742113_1289 (size=903927) 2024-12-02T04:25:25,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742113_1289 (size=903927) 2024-12-02T04:25:25,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742113_1289 (size=903927) 2024-12-02T04:25:25,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T04:25:25,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T04:25:25,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T04:25:25,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742115_1291 (size=440957) 2024-12-02T04:25:25,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742115_1291 (size=440957) 2024-12-02T04:25:25,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742115_1291 (size=440957) 2024-12-02T04:25:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T04:25:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T04:25:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T04:25:25,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742117_1293 (size=77835) 2024-12-02T04:25:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742117_1293 (size=77835) 2024-12-02T04:25:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742117_1293 (size=77835) 2024-12-02T04:25:25,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742118_1294 (size=30949) 2024-12-02T04:25:25,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742118_1294 (size=30949) 2024-12-02T04:25:25,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742118_1294 (size=30949) 2024-12-02T04:25:25,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T04:25:25,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T04:25:25,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T04:25:25,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T04:25:25,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T04:25:25,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T04:25:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742121_1297 (size=232957) 2024-12-02T04:25:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742121_1297 (size=232957) 2024-12-02T04:25:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742121_1297 (size=232957) 2024-12-02T04:25:25,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742122_1298 (size=127628) 2024-12-02T04:25:25,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742122_1298 (size=127628) 2024-12-02T04:25:25,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742122_1298 (size=127628) 2024-12-02T04:25:25,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742123_1299 (size=20406) 2024-12-02T04:25:25,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742123_1299 (size=20406) 2024-12-02T04:25:25,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742123_1299 (size=20406) 2024-12-02T04:25:25,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742124_1300 (size=5175431) 2024-12-02T04:25:25,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742124_1300 (size=5175431) 2024-12-02T04:25:25,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742124_1300 (size=5175431) 2024-12-02T04:25:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742125_1301 (size=6424732) 2024-12-02T04:25:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742125_1301 (size=6424732) 2024-12-02T04:25:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742125_1301 (size=6424732) 2024-12-02T04:25:25,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742126_1302 (size=217634) 2024-12-02T04:25:25,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742126_1302 (size=217634) 2024-12-02T04:25:25,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742126_1302 (size=217634) 2024-12-02T04:25:25,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T04:25:25,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T04:25:25,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T04:25:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742128_1304 (size=322274) 2024-12-02T04:25:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742128_1304 (size=322274) 2024-12-02T04:25:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742128_1304 (size=322274) 2024-12-02T04:25:25,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742129_1305 (size=503880) 2024-12-02T04:25:25,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742129_1305 (size=503880) 2024-12-02T04:25:25,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742129_1305 (size=503880) 2024-12-02T04:25:25,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742130_1306 (size=29229) 2024-12-02T04:25:25,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742130_1306 (size=29229) 2024-12-02T04:25:25,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742130_1306 (size=29229) 2024-12-02T04:25:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742131_1307 (size=24096) 2024-12-02T04:25:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742131_1307 (size=24096) 2024-12-02T04:25:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742131_1307 (size=24096) 2024-12-02T04:25:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742132_1308 (size=111872) 2024-12-02T04:25:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742132_1308 (size=111872) 2024-12-02T04:25:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742132_1308 (size=111872) 2024-12-02T04:25:25,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742133_1309 (size=45609) 2024-12-02T04:25:25,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742133_1309 (size=45609) 2024-12-02T04:25:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742133_1309 (size=45609) 2024-12-02T04:25:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742134_1310 (size=136454) 2024-12-02T04:25:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742134_1310 (size=136454) 2024-12-02T04:25:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742134_1310 (size=136454) 2024-12-02T04:25:26,093 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0004_000001 (auth:SIMPLE) from 127.0.0.1:57944 2024-12-02T04:25:26,102 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000001/launch_container.sh] 2024-12-02T04:25:26,102 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000001/container_tokens] 2024-12-02T04:25:26,102 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0004/container_1733113348655_0004_01_000001/sysfs] 2024-12-02T04:25:26,185 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:25:26,187 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T04:25:26,189 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-12-02T04:25:26,189 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-12-02T04:25:26,189 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-12-02T04:25:26,189 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-12-02T04:25:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742135_1311 (size=1023) 2024-12-02T04:25:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742135_1311 (size=1023) 2024-12-02T04:25:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742135_1311 (size=1023) 2024-12-02T04:25:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742136_1312 (size=35) 2024-12-02T04:25:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742136_1312 (size=35) 2024-12-02T04:25:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742136_1312 (size=35) 2024-12-02T04:25:26,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742137_1313 (size=304041) 2024-12-02T04:25:26,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742137_1313 (size=304041) 2024-12-02T04:25:26,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742137_1313 (size=304041) 2024-12-02T04:25:26,254 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:26,254 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:26,585 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 46028d43f5fe50f062bcfc6ac75c12d4 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:25:26,585 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f6f53daf25c27da05945d00784d9b96e changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:25:26,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:36606 2024-12-02T04:25:27,011 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:25:31,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T04:25:31,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-02T04:25:32,219 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:34954 2024-12-02T04:25:32,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742138_1314 (size=349739) 2024-12-02T04:25:32,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742138_1314 (size=349739) 2024-12-02T04:25:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742138_1314 (size=349739) 2024-12-02T04:25:34,457 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:46222 2024-12-02T04:25:34,463 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:36812 2024-12-02T04:25:35,334 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:46230 2024-12-02T04:25:35,337 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:36818 2024-12-02T04:25:38,095 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:25:43,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000003/launch_container.sh] 2024-12-02T04:25:43,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000003/container_tokens] 2024-12-02T04:25:43,259 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000003/sysfs] 2024-12-02T04:25:43,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742139_1315 (size=31808) 2024-12-02T04:25:43,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742139_1315 (size=31808) 2024-12-02T04:25:43,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742139_1315 (size=31808) 2024-12-02T04:25:43,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742140_1316 (size=463) 2024-12-02T04:25:43,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742140_1316 (size=463) 2024-12-02T04:25:43,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742140_1316 (size=463) 2024-12-02T04:25:43,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742141_1317 (size=31808) 2024-12-02T04:25:43,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742141_1317 (size=31808) 2024-12-02T04:25:43,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742141_1317 (size=31808) 2024-12-02T04:25:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742142_1318 (size=349739) 2024-12-02T04:25:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742142_1318 (size=349739) 2024-12-02T04:25:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742142_1318 (size=349739) 2024-12-02T04:25:43,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000005/launch_container.sh] 2024-12-02T04:25:43,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000005/container_tokens] 2024-12-02T04:25:43,391 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000005/sysfs] 2024-12-02T04:25:43,811 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:49894 2024-12-02T04:25:45,474 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000002/launch_container.sh] 2024-12-02T04:25:45,474 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000002/container_tokens] 2024-12-02T04:25:45,474 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000002/sysfs] 2024-12-02T04:25:45,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:25:45,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:25:45,515 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T04:25:45,515 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:25:45,515 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:25:45,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T04:25:45,520 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T04:25:45,520 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T04:25:45,520 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@530d277f in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T04:25:45,520 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T04:25:45,520 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T04:25:45,522 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:45,562 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:25:45,562 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@530d277f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T04:25:45,564 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:25:45,573 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T04:25:45,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:45,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:45,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-6270477886610394550.jar 2024-12-02T04:25:46,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,530 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-14575672174759022013.jar 2024-12-02T04:25:46,530 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,530 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:25:46,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:25:46,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:25:46,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:25:46,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:25:46,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:25:46,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:25:46,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:25:46,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:25:46,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:25:46,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:25:46,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:25:46,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:46,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:46,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:46,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:46,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:25:46,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:46,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:25:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742143_1319 (size=131440) 2024-12-02T04:25:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742143_1319 (size=131440) 2024-12-02T04:25:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742143_1319 (size=131440) 2024-12-02T04:25:46,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T04:25:46,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T04:25:46,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T04:25:46,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T04:25:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T04:25:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T04:25:46,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742146_1322 (size=903927) 2024-12-02T04:25:46,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742146_1322 (size=903927) 2024-12-02T04:25:46,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742146_1322 (size=903927) 2024-12-02T04:25:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T04:25:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T04:25:46,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T04:25:46,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T04:25:46,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T04:25:46,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T04:25:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742149_1325 (size=77835) 2024-12-02T04:25:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742149_1325 (size=77835) 2024-12-02T04:25:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742149_1325 (size=77835) 2024-12-02T04:25:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742150_1326 (size=30949) 2024-12-02T04:25:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742150_1326 (size=30949) 2024-12-02T04:25:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742150_1326 (size=30949) 2024-12-02T04:25:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742151_1327 (size=1597213) 2024-12-02T04:25:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742151_1327 (size=1597213) 2024-12-02T04:25:46,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742151_1327 (size=1597213) 2024-12-02T04:25:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742152_1328 (size=440957) 2024-12-02T04:25:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742152_1328 (size=440957) 2024-12-02T04:25:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742152_1328 (size=440957) 2024-12-02T04:25:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742153_1329 (size=6424732) 2024-12-02T04:25:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742153_1329 (size=6424732) 2024-12-02T04:25:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742153_1329 (size=6424732) 2024-12-02T04:25:46,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T04:25:46,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T04:25:46,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T04:25:46,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742155_1331 (size=232957) 2024-12-02T04:25:46,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742155_1331 (size=232957) 2024-12-02T04:25:46,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742155_1331 (size=232957) 2024-12-02T04:25:46,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742156_1332 (size=127628) 2024-12-02T04:25:46,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742156_1332 (size=127628) 2024-12-02T04:25:46,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742156_1332 (size=127628) 2024-12-02T04:25:46,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742157_1333 (size=20406) 2024-12-02T04:25:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742157_1333 (size=20406) 2024-12-02T04:25:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742157_1333 (size=20406) 2024-12-02T04:25:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T04:25:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T04:25:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T04:25:46,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742159_1335 (size=217634) 2024-12-02T04:25:46,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742159_1335 (size=217634) 2024-12-02T04:25:46,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742159_1335 (size=217634) 2024-12-02T04:25:46,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T04:25:46,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T04:25:46,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T04:25:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742161_1337 (size=322274) 2024-12-02T04:25:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742161_1337 (size=322274) 2024-12-02T04:25:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742161_1337 (size=322274) 2024-12-02T04:25:46,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742162_1338 (size=503880) 2024-12-02T04:25:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742162_1338 (size=503880) 2024-12-02T04:25:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742162_1338 (size=503880) 2024-12-02T04:25:46,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742163_1339 (size=29229) 2024-12-02T04:25:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742163_1339 (size=29229) 2024-12-02T04:25:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742163_1339 (size=29229) 2024-12-02T04:25:46,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742164_1340 (size=24096) 2024-12-02T04:25:46,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742164_1340 (size=24096) 2024-12-02T04:25:46,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742164_1340 (size=24096) 2024-12-02T04:25:46,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742165_1341 (size=111872) 2024-12-02T04:25:46,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742165_1341 (size=111872) 2024-12-02T04:25:46,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742165_1341 (size=111872) 2024-12-02T04:25:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742166_1342 (size=45609) 2024-12-02T04:25:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742166_1342 (size=45609) 2024-12-02T04:25:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742166_1342 (size=45609) 2024-12-02T04:25:46,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742167_1343 (size=136454) 2024-12-02T04:25:46,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742167_1343 (size=136454) 2024-12-02T04:25:46,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742167_1343 (size=136454) 2024-12-02T04:25:46,824 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:25:46,826 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T04:25:46,827 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-12-02T04:25:46,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-12-02T04:25:46,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-12-02T04:25:46,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-12-02T04:25:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742168_1344 (size=1023) 2024-12-02T04:25:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742168_1344 (size=1023) 2024-12-02T04:25:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742168_1344 (size=1023) 2024-12-02T04:25:46,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742169_1345 (size=35) 2024-12-02T04:25:46,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742169_1345 (size=35) 2024-12-02T04:25:46,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742169_1345 (size=35) 2024-12-02T04:25:46,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742170_1346 (size=304043) 2024-12-02T04:25:46,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742170_1346 (size=304043) 2024-12-02T04:25:46,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742170_1346 (size=304043) 2024-12-02T04:25:47,450 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000004/launch_container.sh] 2024-12-02T04:25:47,450 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000004/container_tokens] 2024-12-02T04:25:47,450 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000004/sysfs] 2024-12-02T04:25:49,898 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:49,898 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:25:49,901 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0005_000001 (auth:SIMPLE) from 127.0.0.1:37426 2024-12-02T04:25:49,911 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000001/launch_container.sh] 2024-12-02T04:25:49,911 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000001/container_tokens] 2024-12-02T04:25:49,911 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0005/container_1733113348655_0005_01_000001/sysfs] 2024-12-02T04:25:50,366 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:49906 2024-12-02T04:25:50,563 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:25:54,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:41276 2024-12-02T04:25:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742171_1347 (size=349741) 2024-12-02T04:25:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742171_1347 (size=349741) 2024-12-02T04:25:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742171_1347 (size=349741) 2024-12-02T04:25:57,163 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:50874 2024-12-02T04:25:57,163 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:34616 2024-12-02T04:25:57,983 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:34624 2024-12-02T04:25:57,991 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:50888 2024-12-02T04:26:00,904 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:26:03,184 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000002/launch_container.sh] 2024-12-02T04:26:03,184 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000002/container_tokens] 2024-12-02T04:26:03,184 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000002/sysfs] 2024-12-02T04:26:06,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000004/launch_container.sh] 2024-12-02T04:26:06,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000004/container_tokens] 2024-12-02T04:26:06,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000004/sysfs] 2024-12-02T04:26:06,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742172_1348 (size=29739) 2024-12-02T04:26:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742172_1348 (size=29739) 2024-12-02T04:26:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742172_1348 (size=29739) 2024-12-02T04:26:06,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742173_1349 (size=463) 2024-12-02T04:26:06,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742173_1349 (size=463) 2024-12-02T04:26:06,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742173_1349 (size=463) 2024-12-02T04:26:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742174_1350 (size=29739) 2024-12-02T04:26:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742174_1350 (size=29739) 2024-12-02T04:26:06,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742174_1350 (size=29739) 2024-12-02T04:26:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742175_1351 (size=349741) 2024-12-02T04:26:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742175_1351 (size=349741) 2024-12-02T04:26:06,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742175_1351 (size=349741) 2024-12-02T04:26:06,974 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:41266 2024-12-02T04:26:06,982 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:47188 2024-12-02T04:26:06,988 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:41282 2024-12-02T04:26:07,198 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 46028d43f5fe50f062bcfc6ac75c12d4, had cached 0 bytes from a total of 6720 2024-12-02T04:26:07,198 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f6f53daf25c27da05945d00784d9b96e, had cached 0 bytes from a total of 14045 2024-12-02T04:26:08,314 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:26:08,315 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:26:08,343 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T04:26:08,343 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:26:08,344 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:26:08,344 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T04:26:08,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T04:26:08,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T04:26:08,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@530d277f in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T04:26:08,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T04:26:08,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113523997/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T04:26:08,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-02T04:26:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T04:26:08,408 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113568407"}]},"ts":"1733113568407"} 2024-12-02T04:26:08,410 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-02T04:26:08,410 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-02T04:26:08,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-02T04:26:08,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, UNASSIGN}] 2024-12-02T04:26:08,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, UNASSIGN 2024-12-02T04:26:08,417 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, UNASSIGN 2024-12-02T04:26:08,419 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=f6f53daf25c27da05945d00784d9b96e, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:08,419 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=46028d43f5fe50f062bcfc6ac75c12d4, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:26:08,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, UNASSIGN because future has completed 2024-12-02T04:26:08,422 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:08,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:26:08,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, UNASSIGN because future has completed 2024-12-02T04:26:08,424 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:08,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T04:26:08,579 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:26:08,579 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:08,579 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 46028d43f5fe50f062bcfc6ac75c12d4, disabling compactions & flushes 2024-12-02T04:26:08,579 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:26:08,579 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:26:08,579 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. after waiting 0 ms 2024-12-02T04:26:08,579 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:26:08,580 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:26:08,580 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:08,580 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing f6f53daf25c27da05945d00784d9b96e, disabling compactions & flushes 2024-12-02T04:26:08,580 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:26:08,580 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:26:08,580 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. after waiting 0 ms 2024-12-02T04:26:08,580 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:26:08,651 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:08,651 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:08,654 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:08,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:08,655 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4. 2024-12-02T04:26:08,655 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e. 2024-12-02T04:26:08,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 46028d43f5fe50f062bcfc6ac75c12d4: Waiting for close lock at 1733113568579Running coprocessor pre-close hooks at 1733113568579Disabling compacts and flushes for region at 1733113568579Disabling writes for close at 1733113568579Writing region close event to WAL at 1733113568619 (+40 ms)Running coprocessor post-close hooks at 1733113568654 (+35 ms)Closed at 1733113568655 (+1 ms) 2024-12-02T04:26:08,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for f6f53daf25c27da05945d00784d9b96e: Waiting for close lock at 1733113568580Running coprocessor pre-close hooks at 1733113568580Disabling compacts and flushes for region at 1733113568580Disabling writes for close at 1733113568580Writing region close event to WAL at 1733113568619 (+39 ms)Running coprocessor post-close hooks at 1733113568655 (+36 ms)Closed at 1733113568655 2024-12-02T04:26:08,659 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=f6f53daf25c27da05945d00784d9b96e, regionState=CLOSED 2024-12-02T04:26:08,660 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:26:08,661 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:26:08,662 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=46028d43f5fe50f062bcfc6ac75c12d4, regionState=CLOSED 2024-12-02T04:26:08,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:08,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:26:08,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-02T04:26:08,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure f6f53daf25c27da05945d00784d9b96e, server=e2eaa0f11f7e,41643,1733113342088 in 240 msec 2024-12-02T04:26:08,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-02T04:26:08,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6f53daf25c27da05945d00784d9b96e, UNASSIGN in 252 msec 2024-12-02T04:26:08,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 46028d43f5fe50f062bcfc6ac75c12d4, server=e2eaa0f11f7e,34291,1733113342258 in 244 msec 2024-12-02T04:26:08,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-12-02T04:26:08,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=46028d43f5fe50f062bcfc6ac75c12d4, UNASSIGN in 254 msec 2024-12-02T04:26:08,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-02T04:26:08,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 258 msec 2024-12-02T04:26:08,674 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113568673"}]},"ts":"1733113568673"} 2024-12-02T04:26:08,675 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-02T04:26:08,675 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-02T04:26:08,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 275 msec 2024-12-02T04:26:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T04:26:08,728 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T04:26:08,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-02T04:26:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,731 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-02T04:26:08,736 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,738 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-02T04:26:08,740 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:26:08,740 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:26:08,742 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/recovered.edits] 2024-12-02T04:26:08,745 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/recovered.edits] 2024-12-02T04:26:08,748 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/cf/943305628dac48bf9d4eeacc7f4e1906 2024-12-02T04:26:08,748 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/cf/0e1ca781b8eb4771bd1e1db281949b67 2024-12-02T04:26:08,751 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4/recovered.edits/9.seqid 2024-12-02T04:26:08,751 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e/recovered.edits/9.seqid 2024-12-02T04:26:08,752 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:26:08,754 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testConsecutiveExports/46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:26:08,755 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-02T04:26:08,755 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-02T04:26:08,756 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-02T04:26:08,765 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241202bb18f0e602b6481ca9aac45ecae886fa_f6f53daf25c27da05945d00784d9b96e 2024-12-02T04:26:08,767 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412020cac2e26f32b412db8641f0e91638f7a_46028d43f5fe50f062bcfc6ac75c12d4 2024-12-02T04:26:08,768 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-02T04:26:08,770 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,774 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-02T04:26:08,776 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-02T04:26:08,777 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,777 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-02T04:26:08,778 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113568777"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:08,778 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113568777"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:08,780 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:26:08,780 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 46028d43f5fe50f062bcfc6ac75c12d4, NAME => 'testtb-testConsecutiveExports,,1733113521793.46028d43f5fe50f062bcfc6ac75c12d4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f6f53daf25c27da05945d00784d9b96e, NAME => 'testtb-testConsecutiveExports,1,1733113521793.f6f53daf25c27da05945d00784d9b96e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:26:08,780 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-02T04:26:08,781 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113568780"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:08,783 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-02T04:26:08,784 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T04:26:08,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 56 msec 2024-12-02T04:26:08,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T04:26:08,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T04:26:08,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T04:26:08,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T04:26:08,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:08,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:08,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:08,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T04:26:08,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-02T04:26:08,809 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-02T04:26:08,809 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T04:26:08,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T04:26:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-02T04:26:08,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T04:26:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-02T04:26:08,843 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=811 (was 802) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2033196956_1 at /127.0.0.1:60924 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 140159) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:59816 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:60948 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42125 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2033196956_1 at /127.0.0.1:60772 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5392 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=799 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=904 (was 824) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 23), AvailableMemoryMB=2568 (was 2829) 2024-12-02T04:26:08,843 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-02T04:26:08,863 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=811, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=904, ProcessCount=20, AvailableMemoryMB=2571 2024-12-02T04:26:08,863 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-02T04:26:08,865 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:08,867 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:08,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-02T04:26:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T04:26:08,868 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:08,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742176_1352 (size=458) 2024-12-02T04:26:08,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742176_1352 (size=458) 2024-12-02T04:26:08,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742176_1352 (size=458) 2024-12-02T04:26:08,881 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c55f93e6a1eafc37c98afe2be6853b3f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:08,883 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 87d741e5cfd316e6fc89dfc072993c57, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:08,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742177_1353 (size=83) 2024-12-02T04:26:08,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742177_1353 (size=83) 2024-12-02T04:26:08,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742177_1353 (size=83) 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing c55f93e6a1eafc37c98afe2be6853b3f, disabling compactions & flushes 2024-12-02T04:26:08,907 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. after waiting 0 ms 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:08,907 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:08,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for c55f93e6a1eafc37c98afe2be6853b3f: Waiting for close lock at 1733113568907Disabling compacts and flushes for region at 1733113568907Disabling writes for close at 1733113568907Writing region close event to WAL at 1733113568907Closed at 1733113568907 2024-12-02T04:26:08,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742178_1354 (size=83) 2024-12-02T04:26:08,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742178_1354 (size=83) 2024-12-02T04:26:08,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742178_1354 (size=83) 2024-12-02T04:26:08,909 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:08,909 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 87d741e5cfd316e6fc89dfc072993c57, disabling compactions & flushes 2024-12-02T04:26:08,909 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:08,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:08,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. after waiting 0 ms 2024-12-02T04:26:08,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:08,910 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:08,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 87d741e5cfd316e6fc89dfc072993c57: Waiting for close lock at 1733113568909Disabling compacts and flushes for region at 1733113568909Disabling writes for close at 1733113568910 (+1 ms)Writing region close event to WAL at 1733113568910Closed at 1733113568910 2024-12-02T04:26:08,911 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:08,911 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733113568911"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113568911"}]},"ts":"1733113568911"} 2024-12-02T04:26:08,912 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733113568911"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113568911"}]},"ts":"1733113568911"} 2024-12-02T04:26:08,915 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:08,919 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:08,919 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113568919"}]},"ts":"1733113568919"} 2024-12-02T04:26:08,922 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-02T04:26:08,922 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:08,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:08,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:08,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:08,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:08,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:08,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:08,924 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:08,924 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:08,924 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:08,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:08,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, ASSIGN}] 2024-12-02T04:26:08,926 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, ASSIGN 2024-12-02T04:26:08,926 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, ASSIGN 2024-12-02T04:26:08,927 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:08,931 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T04:26:09,078 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:09,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=87d741e5cfd316e6fc89dfc072993c57, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:09,080 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=c55f93e6a1eafc37c98afe2be6853b3f, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:09,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, ASSIGN because future has completed 2024-12-02T04:26:09,083 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:09,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, ASSIGN because future has completed 2024-12-02T04:26:09,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:09,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T04:26:09,243 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:09,244 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 87d741e5cfd316e6fc89dfc072993c57, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:26:09,244 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. service=AccessControlService 2024-12-02T04:26:09,244 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:09,245 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,245 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:09,245 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,245 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,248 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,248 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => c55f93e6a1eafc37c98afe2be6853b3f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:26:09,248 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. service=AccessControlService 2024-12-02T04:26:09,249 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:09,249 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,249 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:09,249 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,249 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,261 INFO [StoreOpener-87d741e5cfd316e6fc89dfc072993c57-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,263 INFO [StoreOpener-c55f93e6a1eafc37c98afe2be6853b3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,264 INFO [StoreOpener-87d741e5cfd316e6fc89dfc072993c57-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87d741e5cfd316e6fc89dfc072993c57 columnFamilyName cf 2024-12-02T04:26:09,265 DEBUG [StoreOpener-87d741e5cfd316e6fc89dfc072993c57-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:09,266 INFO [StoreOpener-87d741e5cfd316e6fc89dfc072993c57-1 {}] regionserver.HStore(327): Store=87d741e5cfd316e6fc89dfc072993c57/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:09,267 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,267 INFO [StoreOpener-c55f93e6a1eafc37c98afe2be6853b3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c55f93e6a1eafc37c98afe2be6853b3f columnFamilyName cf 2024-12-02T04:26:09,268 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,268 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,268 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,269 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,269 DEBUG [StoreOpener-c55f93e6a1eafc37c98afe2be6853b3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:09,270 INFO [StoreOpener-c55f93e6a1eafc37c98afe2be6853b3f-1 {}] regionserver.HStore(327): Store=c55f93e6a1eafc37c98afe2be6853b3f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:09,271 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,271 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,272 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,272 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,272 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,274 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,278 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,279 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:09,280 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened c55f93e6a1eafc37c98afe2be6853b3f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74320717, jitterRate=0.1074649840593338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:09,280 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,281 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for c55f93e6a1eafc37c98afe2be6853b3f: Running coprocessor pre-open hook at 1733113569249Writing region info on filesystem at 1733113569249Initializing all the Stores at 1733113569253 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113569253Cleaning up temporary data from old regions at 1733113569272 (+19 ms)Running coprocessor post-open hooks at 1733113569280 (+8 ms)Region opened successfully at 1733113569281 (+1 ms) 2024-12-02T04:26:09,282 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f., pid=136, masterSystemTime=1733113569241 2024-12-02T04:26:09,283 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:09,283 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,283 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,284 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 87d741e5cfd316e6fc89dfc072993c57; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74752206, jitterRate=0.11389467120170593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:09,284 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,284 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 87d741e5cfd316e6fc89dfc072993c57: Running coprocessor pre-open hook at 1733113569245Writing region info on filesystem at 1733113569245Initializing all the Stores at 1733113569247 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113569247Cleaning up temporary data from old regions at 1733113569269 (+22 ms)Running coprocessor post-open hooks at 1733113569284 (+15 ms)Region opened successfully at 1733113569284 2024-12-02T04:26:09,284 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=c55f93e6a1eafc37c98afe2be6853b3f, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:09,285 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57., pid=135, masterSystemTime=1733113569238 2024-12-02T04:26:09,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:09,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:09,287 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:09,289 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=87d741e5cfd316e6fc89dfc072993c57, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:09,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:09,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-02T04:26:09,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088 in 202 msec 2024-12-02T04:26:09,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, ASSIGN in 369 msec 2024-12-02T04:26:09,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-02T04:26:09,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217 in 209 msec 2024-12-02T04:26:09,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-02T04:26:09,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, ASSIGN in 370 msec 2024-12-02T04:26:09,297 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:09,298 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113569298"}]},"ts":"1733113569298"} 2024-12-02T04:26:09,300 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-02T04:26:09,301 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:09,301 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-02T04:26:09,304 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T04:26:09,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:09,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:09,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:09,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:09,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:09,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:09,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:09,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:09,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 491 msec 2024-12-02T04:26:09,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T04:26:09,497 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T04:26:09,498 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,500 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,501 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,501 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:09,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,509 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,515 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T04:26:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113569518 (current time:1733113569518). 2024-12-02T04:26:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T04:26:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b383139, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:09,520 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c32110f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:09,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:09,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,522 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:09,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@492f5fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:09,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:09,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,525 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,526 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,527 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e7429eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:09,529 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43a7f90e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,530 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:09,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@498844a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:09,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:09,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,533 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:09,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T04:26:09,538 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T04:26:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T04:26:09,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T04:26:09,541 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:09,542 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:09,545 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742179_1355 (size=215) 2024-12-02T04:26:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742179_1355 (size=215) 2024-12-02T04:26:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742179_1355 (size=215) 2024-12-02T04:26:09,562 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:09,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57}] 2024-12-02T04:26:09,563 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,563 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T04:26:09,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-02T04:26:09,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 87d741e5cfd316e6fc89dfc072993c57: 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for c55f93e6a1eafc37c98afe2be6853b3f: 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:09,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:09,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742181_1357 (size=86) 2024-12-02T04:26:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742180_1356 (size=86) 2024-12-02T04:26:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742181_1357 (size=86) 2024-12-02T04:26:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742180_1356 (size=86) 2024-12-02T04:26:09,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742180_1356 (size=86) 2024-12-02T04:26:09,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-02T04:26:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-02T04:26:09,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,737 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:09,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742181_1357 (size=86) 2024-12-02T04:26:09,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f in 178 msec 2024-12-02T04:26:09,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:09,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-02T04:26:09,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-02T04:26:09,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,748 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-02T04:26:09,754 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:09,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 in 189 msec 2024-12-02T04:26:09,755 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:09,756 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:09,756 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:09,756 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:09,756 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:26:09,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742182_1358 (size=78) 2024-12-02T04:26:09,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742182_1358 (size=78) 2024-12-02T04:26:09,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742182_1358 (size=78) 2024-12-02T04:26:09,772 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:09,772 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,773 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742183_1359 (size=713) 2024-12-02T04:26:09,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742183_1359 (size=713) 2024-12-02T04:26:09,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742183_1359 (size=713) 2024-12-02T04:26:09,796 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:09,802 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:09,802 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,803 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:09,804 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T04:26:09,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 265 msec 2024-12-02T04:26:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T04:26:09,858 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T04:26:09,865 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:09,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:09,868 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:09,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:09,872 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:09,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,878 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,883 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T04:26:09,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T04:26:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113569885 (current time:1733113569885). 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d94df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:09,887 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:09,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:09,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:09,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@396b062, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:09,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:09,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,888 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48942, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:09,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ced7063, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:09,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:09,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,890 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,891 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@561256dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:09,893 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b5ab49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:09,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,894 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:09,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b52c813, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:09,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:09,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,897 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:09,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:09,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:09,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:09,900 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T04:26:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T04:26:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T04:26:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T04:26:09,903 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:09,904 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:09,906 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:09,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742184_1360 (size=210) 2024-12-02T04:26:09,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742184_1360 (size=210) 2024-12-02T04:26:09,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742184_1360 (size=210) 2024-12-02T04:26:09,912 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:09,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57}] 2024-12-02T04:26:09,913 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:09,913 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T04:26:10,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-02T04:26:10,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-02T04:26:10,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:10,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:10,066 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing c55f93e6a1eafc37c98afe2be6853b3f 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T04:26:10,066 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 87d741e5cfd316e6fc89dfc072993c57 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T04:26:10,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f is 71, key is 02cee47bb60f77537f561120be70c2e0/cf:q/1733113569864/Put/seqid=0 2024-12-02T04:26:10,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 is 71, key is 15e67c1030b63c2e0b205be10a194e99/cf:q/1733113569867/Put/seqid=0 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742186_1362 (size=8102) 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742185_1361 (size=5172) 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742185_1361 (size=5172) 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742186_1362 (size=8102) 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742186_1362 (size=8102) 2024-12-02T04:26:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742185_1361 (size=5172) 2024-12-02T04:26:10,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:10,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:10,094 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:10,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:10,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/.tmp/cf/03923bd289ad437fa3de9fceefdc8a21, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=87d741e5cfd316e6fc89dfc072993c57] 2024-12-02T04:26:10,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/.tmp/cf/dd2f4d65eb3b40d19cc2dc2a6368158a, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=c55f93e6a1eafc37c98afe2be6853b3f] 2024-12-02T04:26:10,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/.tmp/cf/03923bd289ad437fa3de9fceefdc8a21 is 224, key is 14da8433d9e666d26d91c8b60ffa7bd62/cf:q/1733113569867/Put/seqid=0 2024-12-02T04:26:10,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/.tmp/cf/dd2f4d65eb3b40d19cc2dc2a6368158a is 224, key is 0bbaadbe6f60fb73115f2ab4aad5e4633/cf:q/1733113569864/Put/seqid=0 2024-12-02T04:26:10,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742188_1364 (size=15499) 2024-12-02T04:26:10,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742188_1364 (size=15499) 2024-12-02T04:26:10,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742188_1364 (size=15499) 2024-12-02T04:26:10,103 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/.tmp/cf/03923bd289ad437fa3de9fceefdc8a21 2024-12-02T04:26:10,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/.tmp/cf/03923bd289ad437fa3de9fceefdc8a21 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21 2024-12-02T04:26:10,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21, entries=46, sequenceid=6, filesize=15.1 K 2024-12-02T04:26:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742187_1363 (size=6198) 2024-12-02T04:26:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742187_1363 (size=6198) 2024-12-02T04:26:10,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 87d741e5cfd316e6fc89dfc072993c57 in 49ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742187_1363 (size=6198) 2024-12-02T04:26:10,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-02T04:26:10,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 87d741e5cfd316e6fc89dfc072993c57: 2024-12-02T04:26:10,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T04:26:10,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:10,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21] hfiles 2024-12-02T04:26:10,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742189_1365 (size=125) 2024-12-02T04:26:10,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742189_1365 (size=125) 2024-12-02T04:26:10,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742189_1365 (size=125) 2024-12-02T04:26:10,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:10,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-02T04:26:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-02T04:26:10,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:10,121 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:10,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87d741e5cfd316e6fc89dfc072993c57 in 210 msec 2024-12-02T04:26:10,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T04:26:10,513 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/.tmp/cf/dd2f4d65eb3b40d19cc2dc2a6368158a 2024-12-02T04:26:10,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/.tmp/cf/dd2f4d65eb3b40d19cc2dc2a6368158a as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a 2024-12-02T04:26:10,523 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a, entries=4, sequenceid=6, filesize=6.1 K 2024-12-02T04:26:10,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for c55f93e6a1eafc37c98afe2be6853b3f in 459ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for c55f93e6a1eafc37c98afe2be6853b3f: 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a] hfiles 2024-12-02T04:26:10,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T04:26:10,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742190_1366 (size=125) 2024-12-02T04:26:10,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742190_1366 (size=125) 2024-12-02T04:26:10,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742190_1366 (size=125) 2024-12-02T04:26:10,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:10,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-02T04:26:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-02T04:26:10,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:10,531 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:10,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-12-02T04:26:10,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f in 619 msec 2024-12-02T04:26:10,533 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:10,533 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:10,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:10,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:10,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:10,535 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f] hfiles 2024-12-02T04:26:10,536 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:10,536 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:10,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742191_1367 (size=309) 2024-12-02T04:26:10,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742191_1367 (size=309) 2024-12-02T04:26:10,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742191_1367 (size=309) 2024-12-02T04:26:10,545 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:10,545 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,546 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742192_1368 (size=1023) 2024-12-02T04:26:10,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742192_1368 (size=1023) 2024-12-02T04:26:10,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742192_1368 (size=1023) 2024-12-02T04:26:10,554 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:10,560 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:10,561 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:10,562 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:10,562 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T04:26:10,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 661 msec 2024-12-02T04:26:11,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T04:26:11,038 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T04:26:11,041 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:26:11,041 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:26:11,041 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T04:26:11,043 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:11,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:11,045 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:11,045 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:11,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-02T04:26:11,046 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:11,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T04:26:11,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742193_1369 (size=399) 2024-12-02T04:26:11,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742193_1369 (size=399) 2024-12-02T04:26:11,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742193_1369 (size=399) 2024-12-02T04:26:11,054 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 69058ef34891eda9d3b7f66795cafb0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:11,055 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9957ffe4e057bbfde8162a8da3843741, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:11,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742195_1371 (size=85) 2024-12-02T04:26:11,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742195_1371 (size=85) 2024-12-02T04:26:11,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742195_1371 (size=85) 2024-12-02T04:26:11,063 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:11,063 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 9957ffe4e057bbfde8162a8da3843741, disabling compactions & flushes 2024-12-02T04:26:11,063 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,063 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,063 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. after waiting 0 ms 2024-12-02T04:26:11,063 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,064 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9957ffe4e057bbfde8162a8da3843741: Waiting for close lock at 1733113571063Disabling compacts and flushes for region at 1733113571063Disabling writes for close at 1733113571063Writing region close event to WAL at 1733113571064 (+1 ms)Closed at 1733113571064 2024-12-02T04:26:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742194_1370 (size=85) 2024-12-02T04:26:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742194_1370 (size=85) 2024-12-02T04:26:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742194_1370 (size=85) 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 69058ef34891eda9d3b7f66795cafb0c, disabling compactions & flushes 2024-12-02T04:26:11,065 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. after waiting 0 ms 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,065 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,065 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 69058ef34891eda9d3b7f66795cafb0c: Waiting for close lock at 1733113571065Disabling compacts and flushes for region at 1733113571065Disabling writes for close at 1733113571065Writing region close event to WAL at 1733113571065Closed at 1733113571065 2024-12-02T04:26:11,066 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:11,066 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733113571066"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113571066"}]},"ts":"1733113571066"} 2024-12-02T04:26:11,066 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733113571066"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113571066"}]},"ts":"1733113571066"} 2024-12-02T04:26:11,068 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:11,069 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:11,069 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113571069"}]},"ts":"1733113571069"} 2024-12-02T04:26:11,071 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-02T04:26:11,071 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:11,072 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:11,072 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:11,072 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:11,072 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:11,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, ASSIGN}] 2024-12-02T04:26:11,073 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, ASSIGN 2024-12-02T04:26:11,073 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, ASSIGN 2024-12-02T04:26:11,074 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:11,074 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T04:26:11,224 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:11,225 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=69058ef34891eda9d3b7f66795cafb0c, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:11,225 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=9957ffe4e057bbfde8162a8da3843741, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:11,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, ASSIGN because future has completed 2024-12-02T04:26:11,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:11,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, ASSIGN because future has completed 2024-12-02T04:26:11,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T04:26:11,381 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,381 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 9957ffe4e057bbfde8162a8da3843741, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.', STARTKEY => '2', ENDKEY => ''} 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 69058ef34891eda9d3b7f66795cafb0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.', STARTKEY => '', ENDKEY => '2'} 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. service=AccessControlService 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. service=AccessControlService 2024-12-02T04:26:11,381 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:11,381 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,381 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,382 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,383 INFO [StoreOpener-9957ffe4e057bbfde8162a8da3843741-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,383 INFO [StoreOpener-69058ef34891eda9d3b7f66795cafb0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,384 INFO [StoreOpener-9957ffe4e057bbfde8162a8da3843741-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9957ffe4e057bbfde8162a8da3843741 columnFamilyName cf 2024-12-02T04:26:11,384 INFO [StoreOpener-69058ef34891eda9d3b7f66795cafb0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69058ef34891eda9d3b7f66795cafb0c columnFamilyName cf 2024-12-02T04:26:11,384 DEBUG [StoreOpener-69058ef34891eda9d3b7f66795cafb0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:11,384 DEBUG [StoreOpener-9957ffe4e057bbfde8162a8da3843741-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:11,384 INFO [StoreOpener-9957ffe4e057bbfde8162a8da3843741-1 {}] regionserver.HStore(327): Store=9957ffe4e057bbfde8162a8da3843741/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:11,384 INFO [StoreOpener-69058ef34891eda9d3b7f66795cafb0c-1 {}] regionserver.HStore(327): Store=69058ef34891eda9d3b7f66795cafb0c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:11,384 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,384 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,385 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,386 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,386 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,387 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,388 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,388 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:11,388 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 9957ffe4e057bbfde8162a8da3843741; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68312644, jitterRate=0.01793771982192993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:11,388 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,389 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 9957ffe4e057bbfde8162a8da3843741: Running coprocessor pre-open hook at 1733113571382Writing region info on filesystem at 1733113571382Initializing all the Stores at 1733113571382Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113571382Cleaning up temporary data from old regions at 1733113571386 (+4 ms)Running coprocessor post-open hooks at 1733113571388 (+2 ms)Region opened successfully at 1733113571389 (+1 ms) 2024-12-02T04:26:11,389 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:11,390 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741., pid=146, masterSystemTime=1733113571378 2024-12-02T04:26:11,390 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 69058ef34891eda9d3b7f66795cafb0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61348675, jitterRate=-0.08583350479602814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:11,390 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,390 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 69058ef34891eda9d3b7f66795cafb0c: Running coprocessor pre-open hook at 1733113571382Writing region info on filesystem at 1733113571382Initializing all the Stores at 1733113571382Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113571382Cleaning up temporary data from old regions at 1733113571386 (+4 ms)Running coprocessor post-open hooks at 1733113571390 (+4 ms)Region opened successfully at 1733113571390 2024-12-02T04:26:11,390 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c., pid=147, masterSystemTime=1733113571378 2024-12-02T04:26:11,391 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,391 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,392 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=9957ffe4e057bbfde8162a8da3843741, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:11,393 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=69058ef34891eda9d3b7f66795cafb0c, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:11,393 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,393 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:11,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:11,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-02T04:26:11,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088 in 167 msec 2024-12-02T04:26:11,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-02T04:26:11,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, ASSIGN in 324 msec 2024-12-02T04:26:11,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217 in 168 msec 2024-12-02T04:26:11,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-02T04:26:11,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, ASSIGN in 325 msec 2024-12-02T04:26:11,398 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:11,399 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113571398"}]},"ts":"1733113571398"} 2024-12-02T04:26:11,400 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-02T04:26:11,400 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:11,401 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-02T04:26:11,403 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T04:26:11,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:11,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:11,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:11,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:11,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 413 msec 2024-12-02T04:26:11,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:11,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T04:26:11,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:11,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-02T04:26:11,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T04:26:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T04:26:11,678 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T04:26:11,681 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c., hostname=e2eaa0f11f7e,37413,1733113342217, seqNum=2] 2024-12-02T04:26:11,685 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:11,687 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-02T04:26:11,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741] 2024-12-02T04:26:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741], force=true 2024-12-02T04:26:11,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741], force=true 2024-12-02T04:26:11,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741], force=true 2024-12-02T04:26:11,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741], force=true 2024-12-02T04:26:11,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T04:26:11,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, UNASSIGN}] 2024-12-02T04:26:11,713 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, UNASSIGN 2024-12-02T04:26:11,713 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, UNASSIGN 2024-12-02T04:26:11,714 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=69058ef34891eda9d3b7f66795cafb0c, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:11,714 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=9957ffe4e057bbfde8162a8da3843741, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:11,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, UNASSIGN because future has completed 2024-12-02T04:26:11,715 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:11,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:11,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, UNASSIGN because future has completed 2024-12-02T04:26:11,716 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:11,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:11,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000003/launch_container.sh] 2024-12-02T04:26:11,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000003/container_tokens] 2024-12-02T04:26:11,771 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000003/sysfs] 2024-12-02T04:26:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T04:26:11,868 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,868 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T04:26:11,868 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 69058ef34891eda9d3b7f66795cafb0c, disabling compactions & flushes 2024-12-02T04:26:11,868 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,868 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,868 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. after waiting 0 ms 2024-12-02T04:26:11,868 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,868 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 69058ef34891eda9d3b7f66795cafb0c 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T04:26:11,869 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,869 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T04:26:11,869 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 9957ffe4e057bbfde8162a8da3843741, disabling compactions & flushes 2024-12-02T04:26:11,869 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,869 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,869 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. after waiting 0 ms 2024-12-02T04:26:11,869 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,869 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 9957ffe4e057bbfde8162a8da3843741 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T04:26:11,886 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/.tmp/cf/960e4b8574f8415fb45611b7599bcfd1 is 28, key is 1/cf:/1733113571682/Put/seqid=0 2024-12-02T04:26:11,886 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/.tmp/cf/9f7bd8404e4c4c019b6285b690c5478a is 28, key is 2/cf:/1733113571685/Put/seqid=0 2024-12-02T04:26:11,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742197_1373 (size=4945) 2024-12-02T04:26:11,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742197_1373 (size=4945) 2024-12-02T04:26:11,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742197_1373 (size=4945) 2024-12-02T04:26:11,892 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/.tmp/cf/9f7bd8404e4c4c019b6285b690c5478a 2024-12-02T04:26:11,896 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/.tmp/cf/9f7bd8404e4c4c019b6285b690c5478a as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a 2024-12-02T04:26:11,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742196_1372 (size=4945) 2024-12-02T04:26:11,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742196_1372 (size=4945) 2024-12-02T04:26:11,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742196_1372 (size=4945) 2024-12-02T04:26:11,901 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/.tmp/cf/960e4b8574f8415fb45611b7599bcfd1 2024-12-02T04:26:11,901 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T04:26:11,902 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 9957ffe4e057bbfde8162a8da3843741 in 33ms, sequenceid=5, compaction requested=false 2024-12-02T04:26:11,902 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-02T04:26:11,906 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/.tmp/cf/960e4b8574f8415fb45611b7599bcfd1 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1 2024-12-02T04:26:11,907 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:26:11,908 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:11,908 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. 2024-12-02T04:26:11,908 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 9957ffe4e057bbfde8162a8da3843741: Waiting for close lock at 1733113571869Running coprocessor pre-close hooks at 1733113571869Disabling compacts and flushes for region at 1733113571869Disabling writes for close at 1733113571869Obtaining lock to block concurrent updates at 1733113571869Preparing flush snapshotting stores in 9957ffe4e057bbfde8162a8da3843741 at 1733113571869Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733113571870 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741. at 1733113571870Flushing 9957ffe4e057bbfde8162a8da3843741/cf: creating writer at 1733113571870Flushing 9957ffe4e057bbfde8162a8da3843741/cf: appending metadata at 1733113571886 (+16 ms)Flushing 9957ffe4e057bbfde8162a8da3843741/cf: closing flushed file at 1733113571886Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51073d31: reopening flushed file at 1733113571896 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 9957ffe4e057bbfde8162a8da3843741 in 33ms, sequenceid=5, compaction requested=false at 1733113571902 (+6 ms)Writing region close event to WAL at 1733113571903 (+1 ms)Running coprocessor post-close hooks at 1733113571908 (+5 ms)Closed at 1733113571908 2024-12-02T04:26:11,909 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:11,910 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=9957ffe4e057bbfde8162a8da3843741, regionState=CLOSED 2024-12-02T04:26:11,910 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T04:26:11,911 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 69058ef34891eda9d3b7f66795cafb0c in 43ms, sequenceid=5, compaction requested=false 2024-12-02T04:26:11,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:11,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-02T04:26:11,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 9957ffe4e057bbfde8162a8da3843741, server=e2eaa0f11f7e,41643,1733113342088 in 197 msec 2024-12-02T04:26:11,915 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:26:11,915 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:11,915 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. 2024-12-02T04:26:11,915 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 69058ef34891eda9d3b7f66795cafb0c: Waiting for close lock at 1733113571868Running coprocessor pre-close hooks at 1733113571868Disabling compacts and flushes for region at 1733113571868Disabling writes for close at 1733113571868Obtaining lock to block concurrent updates at 1733113571868Preparing flush snapshotting stores in 69058ef34891eda9d3b7f66795cafb0c at 1733113571868Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733113571869 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c. at 1733113571869Flushing 69058ef34891eda9d3b7f66795cafb0c/cf: creating writer at 1733113571870 (+1 ms)Flushing 69058ef34891eda9d3b7f66795cafb0c/cf: appending metadata at 1733113571886 (+16 ms)Flushing 69058ef34891eda9d3b7f66795cafb0c/cf: closing flushed file at 1733113571886Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@447acc45: reopening flushed file at 1733113571905 (+19 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 69058ef34891eda9d3b7f66795cafb0c in 43ms, sequenceid=5, compaction requested=false at 1733113571911 (+6 ms)Writing region close event to WAL at 1733113571912 (+1 ms)Running coprocessor post-close hooks at 1733113571915 (+3 ms)Closed at 1733113571915 2024-12-02T04:26:11,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9957ffe4e057bbfde8162a8da3843741, UNASSIGN in 202 msec 2024-12-02T04:26:11,916 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:11,917 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=69058ef34891eda9d3b7f66795cafb0c, regionState=CLOSED 2024-12-02T04:26:11,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:11,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-02T04:26:11,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 69058ef34891eda9d3b7f66795cafb0c, server=e2eaa0f11f7e,37413,1733113342217 in 204 msec 2024-12-02T04:26:11,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-02T04:26:11,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=69058ef34891eda9d3b7f66795cafb0c, UNASSIGN in 208 msec 2024-12-02T04:26:11,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742198_1374 (size=84) 2024-12-02T04:26:11,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742198_1374 (size=84) 2024-12-02T04:26:11,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742198_1374 (size=84) 2024-12-02T04:26:11,936 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:11,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742199_1375 (size=20) 2024-12-02T04:26:11,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742199_1375 (size=20) 2024-12-02T04:26:11,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742199_1375 (size=20) 2024-12-02T04:26:11,992 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000005/launch_container.sh] 2024-12-02T04:26:11,992 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000005/container_tokens] 2024-12-02T04:26:11,992 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000005/sysfs] 2024-12-02T04:26:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T04:26:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T04:26:12,347 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:12,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742200_1376 (size=21) 2024-12-02T04:26:12,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742200_1376 (size=21) 2024-12-02T04:26:12,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742200_1376 (size=21) 2024-12-02T04:26:12,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742201_1377 (size=84) 2024-12-02T04:26:12,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742201_1377 (size=84) 2024-12-02T04:26:12,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742201_1377 (size=84) 2024-12-02T04:26:12,358 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:12,367 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-02T04:26:12,369 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571042.69058ef34891eda9d3b7f66795cafb0c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:12,369 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733113571042.9957ffe4e057bbfde8162a8da3843741.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:12,369 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:12,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, ASSIGN}] 2024-12-02T04:26:12,374 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, ASSIGN 2024-12-02T04:26:12,374 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, ASSIGN; state=MERGED, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:12,525 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T04:26:12,525 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=8cda44f7fdac1fc2fef17d51390bffd4, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:12,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, ASSIGN because future has completed 2024-12-02T04:26:12,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:12,681 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:12,681 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 8cda44f7fdac1fc2fef17d51390bffd4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.', STARTKEY => '', ENDKEY => ''} 2024-12-02T04:26:12,681 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. service=AccessControlService 2024-12-02T04:26:12,682 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:12,682 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,682 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:12,682 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,682 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,683 INFO [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,684 INFO [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8cda44f7fdac1fc2fef17d51390bffd4 columnFamilyName cf 2024-12-02T04:26:12,684 DEBUG [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:12,692 DEBUG [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/960e4b8574f8415fb45611b7599bcfd1.69058ef34891eda9d3b7f66795cafb0c->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1-top 2024-12-02T04:26:12,697 DEBUG [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/9f7bd8404e4c4c019b6285b690c5478a.9957ffe4e057bbfde8162a8da3843741->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a-top 2024-12-02T04:26:12,698 INFO [StoreOpener-8cda44f7fdac1fc2fef17d51390bffd4-1 {}] regionserver.HStore(327): Store=8cda44f7fdac1fc2fef17d51390bffd4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:12,698 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,698 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,699 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,700 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,700 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,701 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,702 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 8cda44f7fdac1fc2fef17d51390bffd4; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59039130, jitterRate=-0.12024840712547302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:12,702 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,702 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 8cda44f7fdac1fc2fef17d51390bffd4: Running coprocessor pre-open hook at 1733113572682Writing region info on filesystem at 1733113572682Initializing all the Stores at 1733113572683 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113572683Cleaning up temporary data from old regions at 1733113572700 (+17 ms)Running coprocessor post-open hooks at 1733113572702 (+2 ms)Region opened successfully at 1733113572702 2024-12-02T04:26:12,703 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4., pid=154, masterSystemTime=1733113572678 2024-12-02T04:26:12,703 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.,because compaction is disabled. 2024-12-02T04:26:12,704 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:12,704 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:12,705 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=8cda44f7fdac1fc2fef17d51390bffd4, regionState=OPEN, openSeqNum=9, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:12,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:12,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-02T04:26:12,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217 in 180 msec 2024-12-02T04:26:12,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-02T04:26:12,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, ASSIGN in 336 msec 2024-12-02T04:26:12,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[69058ef34891eda9d3b7f66795cafb0c, 9957ffe4e057bbfde8162a8da3843741], force=true in 1.0100 sec 2024-12-02T04:26:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T04:26:12,848 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T04:26:12,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113572849 (current time:1733113572849). 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3429fa9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:12,850 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b2b93e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:12,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,852 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:12,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30574966, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:12,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:12,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:12,855 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33076, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:12,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,856 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@331fe0e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:12,858 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:12,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:12,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:12,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41cdb5b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:12,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:12,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,860 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45176, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:12,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a4b569f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:12,862 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:12,862 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:12,863 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:12,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:12,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:12,866 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:12,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:12,868 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T04:26:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T04:26:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T04:26:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T04:26:12,871 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:12,872 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:12,875 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:12,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742202_1378 (size=216) 2024-12-02T04:26:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742202_1378 (size=216) 2024-12-02T04:26:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742202_1378 (size=216) 2024-12-02T04:26:12,892 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:12,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4}] 2024-12-02T04:26:12,893 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T04:26:13,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-02T04:26:13,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:13,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 8cda44f7fdac1fc2fef17d51390bffd4: 2024-12-02T04:26:13,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-02T04:26:13,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:13,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/960e4b8574f8415fb45611b7599bcfd1.69058ef34891eda9d3b7f66795cafb0c->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1-top, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/9f7bd8404e4c4c019b6285b690c5478a.9957ffe4e057bbfde8162a8da3843741->hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a-top] hfiles 2024-12-02T04:26:13,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/960e4b8574f8415fb45611b7599bcfd1.69058ef34891eda9d3b7f66795cafb0c for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/9f7bd8404e4c4c019b6285b690c5478a.9957ffe4e057bbfde8162a8da3843741 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742203_1379 (size=269) 2024-12-02T04:26:13,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742203_1379 (size=269) 2024-12-02T04:26:13,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742203_1379 (size=269) 2024-12-02T04:26:13,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:13,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-02T04:26:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-02T04:26:13,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:13,055 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:13,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-02T04:26:13,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4 in 164 msec 2024-12-02T04:26:13,064 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:13,065 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:13,066 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:13,066 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,067 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,067 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0006_000001 (auth:SIMPLE) from 127.0.0.1:49516 2024-12-02T04:26:13,088 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000001/launch_container.sh] 2024-12-02T04:26:13,088 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000001/container_tokens] 2024-12-02T04:26:13,088 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0006/container_1733113348655_0006_01_000001/sysfs] 2024-12-02T04:26:13,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742204_1380 (size=670) 2024-12-02T04:26:13,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742204_1380 (size=670) 2024-12-02T04:26:13,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742204_1380 (size=670) 2024-12-02T04:26:13,106 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:13,113 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:13,113 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:13,115 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T04:26:13,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 246 msec 2024-12-02T04:26:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T04:26:13,188 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T04:26:13,188 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188 2024-12-02T04:26:13,188 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:13,222 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:13,222 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,224 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:26:13,229 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742205_1381 (size=216) 2024-12-02T04:26:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742205_1381 (size=216) 2024-12-02T04:26:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742205_1381 (size=216) 2024-12-02T04:26:13,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742206_1382 (size=670) 2024-12-02T04:26:13,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742206_1382 (size=670) 2024-12-02T04:26:13,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742206_1382 (size=670) 2024-12-02T04:26:13,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:13,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:13,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-18098280168190439839.jar 2024-12-02T04:26:14,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-17452035683710190179.jar 2024-12-02T04:26:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:26:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:26:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:26:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:26:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:26:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:26:14,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:26:14,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:26:14,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:26:14,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:26:14,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:14,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:14,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:14,196 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:26:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742207_1383 (size=131440) 2024-12-02T04:26:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742207_1383 (size=131440) 2024-12-02T04:26:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742207_1383 (size=131440) 2024-12-02T04:26:14,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T04:26:14,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T04:26:14,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T04:26:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T04:26:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T04:26:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T04:26:14,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742210_1386 (size=903927) 2024-12-02T04:26:14,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742210_1386 (size=903927) 2024-12-02T04:26:14,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742210_1386 (size=903927) 2024-12-02T04:26:14,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T04:26:14,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T04:26:14,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T04:26:14,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742212_1388 (size=1877034) 2024-12-02T04:26:14,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742212_1388 (size=1877034) 2024-12-02T04:26:14,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742212_1388 (size=1877034) 2024-12-02T04:26:14,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742213_1389 (size=77835) 2024-12-02T04:26:14,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742213_1389 (size=77835) 2024-12-02T04:26:14,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742213_1389 (size=77835) 2024-12-02T04:26:14,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742214_1390 (size=30949) 2024-12-02T04:26:14,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742214_1390 (size=30949) 2024-12-02T04:26:14,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742214_1390 (size=30949) 2024-12-02T04:26:14,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742215_1391 (size=1597213) 2024-12-02T04:26:14,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742215_1391 (size=1597213) 2024-12-02T04:26:14,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742215_1391 (size=1597213) 2024-12-02T04:26:14,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742216_1392 (size=4695811) 2024-12-02T04:26:14,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742216_1392 (size=4695811) 2024-12-02T04:26:14,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742216_1392 (size=4695811) 2024-12-02T04:26:14,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742217_1393 (size=6424732) 2024-12-02T04:26:14,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742217_1393 (size=6424732) 2024-12-02T04:26:14,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742217_1393 (size=6424732) 2024-12-02T04:26:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742218_1394 (size=232957) 2024-12-02T04:26:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742218_1394 (size=232957) 2024-12-02T04:26:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742218_1394 (size=232957) 2024-12-02T04:26:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742219_1395 (size=127628) 2024-12-02T04:26:14,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742219_1395 (size=127628) 2024-12-02T04:26:14,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742219_1395 (size=127628) 2024-12-02T04:26:14,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742220_1396 (size=20406) 2024-12-02T04:26:14,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742220_1396 (size=20406) 2024-12-02T04:26:14,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742220_1396 (size=20406) 2024-12-02T04:26:14,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742221_1397 (size=5175431) 2024-12-02T04:26:14,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742221_1397 (size=5175431) 2024-12-02T04:26:14,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742221_1397 (size=5175431) 2024-12-02T04:26:14,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742222_1398 (size=217634) 2024-12-02T04:26:14,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742222_1398 (size=217634) 2024-12-02T04:26:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742222_1398 (size=217634) 2024-12-02T04:26:14,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742223_1399 (size=440957) 2024-12-02T04:26:14,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742223_1399 (size=440957) 2024-12-02T04:26:14,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742223_1399 (size=440957) 2024-12-02T04:26:14,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T04:26:14,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T04:26:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T04:26:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742225_1401 (size=322274) 2024-12-02T04:26:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742225_1401 (size=322274) 2024-12-02T04:26:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742225_1401 (size=322274) 2024-12-02T04:26:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742226_1402 (size=503880) 2024-12-02T04:26:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742226_1402 (size=503880) 2024-12-02T04:26:14,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742226_1402 (size=503880) 2024-12-02T04:26:14,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742227_1403 (size=29229) 2024-12-02T04:26:14,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742227_1403 (size=29229) 2024-12-02T04:26:14,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742227_1403 (size=29229) 2024-12-02T04:26:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742228_1404 (size=24096) 2024-12-02T04:26:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742228_1404 (size=24096) 2024-12-02T04:26:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742228_1404 (size=24096) 2024-12-02T04:26:14,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742229_1405 (size=111872) 2024-12-02T04:26:14,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742229_1405 (size=111872) 2024-12-02T04:26:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742229_1405 (size=111872) 2024-12-02T04:26:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742230_1406 (size=45609) 2024-12-02T04:26:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742230_1406 (size=45609) 2024-12-02T04:26:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742230_1406 (size=45609) 2024-12-02T04:26:14,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742231_1407 (size=136454) 2024-12-02T04:26:14,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742231_1407 (size=136454) 2024-12-02T04:26:14,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742231_1407 (size=136454) 2024-12-02T04:26:14,579 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:26:14,581 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-02T04:26:14,583 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-02T04:26:14,583 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-02T04:26:14,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742232_1408 (size=481) 2024-12-02T04:26:14,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742232_1408 (size=481) 2024-12-02T04:26:14,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742232_1408 (size=481) 2024-12-02T04:26:14,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742233_1409 (size=21) 2024-12-02T04:26:14,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742233_1409 (size=21) 2024-12-02T04:26:14,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742233_1409 (size=21) 2024-12-02T04:26:14,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742234_1410 (size=304057) 2024-12-02T04:26:14,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742234_1410 (size=304057) 2024-12-02T04:26:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742234_1410 (size=304057) 2024-12-02T04:26:14,637 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:14,638 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:14,879 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:39070 2024-12-02T04:26:20,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:37076 2024-12-02T04:26:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742235_1411 (size=349755) 2024-12-02T04:26:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742235_1411 (size=349755) 2024-12-02T04:26:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742235_1411 (size=349755) 2024-12-02T04:26:20,563 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:26:22,428 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:53360 2024-12-02T04:26:22,428 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:51416 2024-12-02T04:26:23,900 WARN [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 0 2024-12-02T04:26:24,927 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 87d741e5cfd316e6fc89dfc072993c57 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:26:24,927 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c55f93e6a1eafc37c98afe2be6853b3f changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:26:26,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742236_1412 (size=4945) 2024-12-02T04:26:26,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742236_1412 (size=4945) 2024-12-02T04:26:26,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742236_1412 (size=4945) 2024-12-02T04:26:26,447 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000002/launch_container.sh] 2024-12-02T04:26:26,447 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000002/container_tokens] 2024-12-02T04:26:26,447 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000002/sysfs] 2024-12-02T04:26:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742238_1414 (size=4945) 2024-12-02T04:26:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742238_1414 (size=4945) 2024-12-02T04:26:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742238_1414 (size=4945) 2024-12-02T04:26:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742237_1413 (size=22246) 2024-12-02T04:26:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742237_1413 (size=22246) 2024-12-02T04:26:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742237_1413 (size=22246) 2024-12-02T04:26:27,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742239_1415 (size=482) 2024-12-02T04:26:27,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742239_1415 (size=482) 2024-12-02T04:26:27,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742239_1415 (size=482) 2024-12-02T04:26:27,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742240_1416 (size=22246) 2024-12-02T04:26:27,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742240_1416 (size=22246) 2024-12-02T04:26:27,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742240_1416 (size=22246) 2024-12-02T04:26:27,684 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000003/launch_container.sh] 2024-12-02T04:26:27,684 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000003/container_tokens] 2024-12-02T04:26:27,684 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000003/sysfs] 2024-12-02T04:26:27,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742241_1417 (size=349755) 2024-12-02T04:26:27,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742241_1417 (size=349755) 2024-12-02T04:26:27,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742241_1417 (size=349755) 2024-12-02T04:26:27,760 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:51420 2024-12-02T04:26:29,805 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:26:29,805 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:26:29,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:29,821 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:26:29,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:26:29,822 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:29,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T04:26:29,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T04:26:29,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:29,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T04:26:29,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113573188/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T04:26:29,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:29,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T04:26:29,847 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113589846"}]},"ts":"1733113589846"} 2024-12-02T04:26:29,850 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-02T04:26:29,850 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-02T04:26:29,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-02T04:26:29,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, UNASSIGN}] 2024-12-02T04:26:29,858 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, UNASSIGN 2024-12-02T04:26:29,864 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=8cda44f7fdac1fc2fef17d51390bffd4, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:29,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, UNASSIGN because future has completed 2024-12-02T04:26:29,867 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:29,867 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:29,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T04:26:30,022 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:30,022 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:30,022 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 8cda44f7fdac1fc2fef17d51390bffd4, disabling compactions & flushes 2024-12-02T04:26:30,022 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:30,022 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:30,022 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. after waiting 0 ms 2024-12-02T04:26:30,022 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:30,044 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-02T04:26:30,045 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:30,045 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4. 2024-12-02T04:26:30,045 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 8cda44f7fdac1fc2fef17d51390bffd4: Waiting for close lock at 1733113590022Running coprocessor pre-close hooks at 1733113590022Disabling compacts and flushes for region at 1733113590022Disabling writes for close at 1733113590022Writing region close event to WAL at 1733113590038 (+16 ms)Running coprocessor post-close hooks at 1733113590045 (+7 ms)Closed at 1733113590045 2024-12-02T04:26:30,048 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:30,048 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=8cda44f7fdac1fc2fef17d51390bffd4, regionState=CLOSED 2024-12-02T04:26:30,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:30,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-02T04:26:30,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 8cda44f7fdac1fc2fef17d51390bffd4, server=e2eaa0f11f7e,37413,1733113342217 in 187 msec 2024-12-02T04:26:30,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-02T04:26:30,060 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8cda44f7fdac1fc2fef17d51390bffd4, UNASSIGN in 203 msec 2024-12-02T04:26:30,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-02T04:26:30,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 208 msec 2024-12-02T04:26:30,068 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113590067"}]},"ts":"1733113590067"} 2024-12-02T04:26:30,070 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-02T04:26:30,070 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-02T04:26:30,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 239 msec 2024-12-02T04:26:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T04:26:30,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T04:26:30,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,164 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,164 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:30,165 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:30,165 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:30,166 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/recovered.edits] 2024-12-02T04:26:30,166 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/recovered.edits] 2024-12-02T04:26:30,166 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/recovered.edits] 2024-12-02T04:26:30,177 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/960e4b8574f8415fb45611b7599bcfd1.69058ef34891eda9d3b7f66795cafb0c to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/960e4b8574f8415fb45611b7599bcfd1.69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:30,177 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/cf/9f7bd8404e4c4c019b6285b690c5478a 2024-12-02T04:26:30,177 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/cf/960e4b8574f8415fb45611b7599bcfd1 2024-12-02T04:26:30,179 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/9f7bd8404e4c4c019b6285b690c5478a.9957ffe4e057bbfde8162a8da3843741 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/cf/9f7bd8404e4c4c019b6285b690c5478a.9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:30,180 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/recovered.edits/8.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c/recovered.edits/8.seqid 2024-12-02T04:26:30,180 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/recovered.edits/8.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741/recovered.edits/8.seqid 2024-12-02T04:26:30,181 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9957ffe4e057bbfde8162a8da3843741 2024-12-02T04:26:30,181 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/69058ef34891eda9d3b7f66795cafb0c 2024-12-02T04:26:30,183 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/recovered.edits/12.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4/recovered.edits/12.seqid 2024-12-02T04:26:30,184 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8cda44f7fdac1fc2fef17d51390bffd4 2024-12-02T04:26:30,184 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-02T04:26:30,186 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,189 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-02T04:26:30,192 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-02T04:26:30,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-02T04:26:30,194 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113590194"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:30,197 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-02T04:26:30,197 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8cda44f7fdac1fc2fef17d51390bffd4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T04:26:30,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-02T04:26:30,198 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113590197"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:30,202 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-02T04:26:30,203 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 45 msec 2024-12-02T04:26:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,316 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T04:26:30,316 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T04:26:30,316 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,493 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-02T04:26:30,493 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T04:26:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-02T04:26:30,495 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,495 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T04:26:30,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,495 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:30,496 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:30,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:30,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T04:26:30,500 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113590500"}]},"ts":"1733113590500"} 2024-12-02T04:26:30,502 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-02T04:26:30,502 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-02T04:26:30,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-02T04:26:30,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, UNASSIGN}] 2024-12-02T04:26:30,506 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, UNASSIGN 2024-12-02T04:26:30,506 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, UNASSIGN 2024-12-02T04:26:30,507 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=87d741e5cfd316e6fc89dfc072993c57, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:30,507 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=c55f93e6a1eafc37c98afe2be6853b3f, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:30,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, UNASSIGN because future has completed 2024-12-02T04:26:30,509 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:30,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:30,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, UNASSIGN because future has completed 2024-12-02T04:26:30,515 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:30,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T04:26:30,667 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:30,667 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:30,667 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing c55f93e6a1eafc37c98afe2be6853b3f, disabling compactions & flushes 2024-12-02T04:26:30,668 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:30,668 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:30,668 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. after waiting 0 ms 2024-12-02T04:26:30,668 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:30,669 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:30,669 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:30,670 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 87d741e5cfd316e6fc89dfc072993c57, disabling compactions & flushes 2024-12-02T04:26:30,670 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:30,670 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:30,670 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. after waiting 0 ms 2024-12-02T04:26:30,670 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:30,673 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:30,674 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:30,674 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f. 2024-12-02T04:26:30,674 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for c55f93e6a1eafc37c98afe2be6853b3f: Waiting for close lock at 1733113590667Running coprocessor pre-close hooks at 1733113590667Disabling compacts and flushes for region at 1733113590667Disabling writes for close at 1733113590668 (+1 ms)Writing region close event to WAL at 1733113590668Running coprocessor post-close hooks at 1733113590674 (+6 ms)Closed at 1733113590674 2024-12-02T04:26:30,674 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:30,675 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:30,675 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57. 2024-12-02T04:26:30,675 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 87d741e5cfd316e6fc89dfc072993c57: Waiting for close lock at 1733113590670Running coprocessor pre-close hooks at 1733113590670Disabling compacts and flushes for region at 1733113590670Disabling writes for close at 1733113590670Writing region close event to WAL at 1733113590670Running coprocessor post-close hooks at 1733113590675 (+5 ms)Closed at 1733113590675 2024-12-02T04:26:30,676 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:30,677 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=c55f93e6a1eafc37c98afe2be6853b3f, regionState=CLOSED 2024-12-02T04:26:30,677 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:30,677 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=87d741e5cfd316e6fc89dfc072993c57, regionState=CLOSED 2024-12-02T04:26:30,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:30,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:30,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-02T04:26:30,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure c55f93e6a1eafc37c98afe2be6853b3f, server=e2eaa0f11f7e,41643,1733113342088 in 171 msec 2024-12-02T04:26:30,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c55f93e6a1eafc37c98afe2be6853b3f, UNASSIGN in 177 msec 2024-12-02T04:26:30,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-02T04:26:30,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 87d741e5cfd316e6fc89dfc072993c57, server=e2eaa0f11f7e,37413,1733113342217 in 166 msec 2024-12-02T04:26:30,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-02T04:26:30,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=87d741e5cfd316e6fc89dfc072993c57, UNASSIGN in 179 msec 2024-12-02T04:26:30,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-02T04:26:30,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 184 msec 2024-12-02T04:26:30,690 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113590690"}]},"ts":"1733113590690"} 2024-12-02T04:26:30,691 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-02T04:26:30,692 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-02T04:26:30,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 197 msec 2024-12-02T04:26:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T04:26:30,818 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T04:26:30,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,820 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,820 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,822 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,823 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:30,823 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:30,825 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/recovered.edits] 2024-12-02T04:26:30,825 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/recovered.edits] 2024-12-02T04:26:30,827 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/cf/03923bd289ad437fa3de9fceefdc8a21 2024-12-02T04:26:30,827 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/cf/dd2f4d65eb3b40d19cc2dc2a6368158a 2024-12-02T04:26:30,829 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57/recovered.edits/9.seqid 2024-12-02T04:26:30,829 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f/recovered.edits/9.seqid 2024-12-02T04:26:30,829 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:30,829 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithMergeRegion/c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:30,829 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-02T04:26:30,830 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-02T04:26:30,830 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-02T04:26:30,833 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412028b4e4397d16d4accb70f19891e5b39af_87d741e5cfd316e6fc89dfc072993c57 2024-12-02T04:26:30,834 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202346badb13fe042f293e21e903b0bece6_c55f93e6a1eafc37c98afe2be6853b3f 2024-12-02T04:26:30,834 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-02T04:26:30,853 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,856 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-02T04:26:30,858 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-02T04:26:30,859 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,860 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-02T04:26:30,860 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113590860"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:30,860 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113590860"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:30,862 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:26:30,862 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c55f93e6a1eafc37c98afe2be6853b3f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733113568865.c55f93e6a1eafc37c98afe2be6853b3f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 87d741e5cfd316e6fc89dfc072993c57, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733113568865.87d741e5cfd316e6fc89dfc072993c57.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:26:30,862 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-02T04:26:30,862 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113590862"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:30,864 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-02T04:26:30,865 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 47 msec 2024-12-02T04:26:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T04:26:30,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T04:26:30,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T04:26:30,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T04:26:30,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:30,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-02T04:26:30,902 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,902 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T04:26:30,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T04:26:30,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,911 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T04:26:30,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:30,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-02T04:26:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:30,932 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=817 (was 811) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:58572 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-614462892_1 at /127.0.0.1:60500 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:38907 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:57764 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 143626) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:60532 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6313 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=818 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=796 (was 904), ProcessCount=17 (was 20), AvailableMemoryMB=2451 (was 2571) 2024-12-02T04:26:30,932 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-02T04:26:30,946 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=817, OpenFileDescriptor=818, MaxFileDescriptor=1048576, SystemLoadAverage=796, ProcessCount=17, AvailableMemoryMB=2451 2024-12-02T04:26:30,946 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-02T04:26:30,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:30,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:30,949 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:30,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-02T04:26:30,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T04:26:30,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742242_1418 (size=443) 2024-12-02T04:26:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742242_1418 (size=443) 2024-12-02T04:26:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742242_1418 (size=443) 2024-12-02T04:26:30,957 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1a7d4e8475fbf118af165732278e70ec, NAME => 'testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:30,958 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e157d59036ec75da7d8bb22d76140eb3, NAME => 'testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:30,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742243_1419 (size=68) 2024-12-02T04:26:30,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742243_1419 (size=68) 2024-12-02T04:26:30,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742243_1419 (size=68) 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 1a7d4e8475fbf118af165732278e70ec, disabling compactions & flushes 2024-12-02T04:26:30,964 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. after waiting 0 ms 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:30,964 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:30,964 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1a7d4e8475fbf118af165732278e70ec: Waiting for close lock at 1733113590964Disabling compacts and flushes for region at 1733113590964Disabling writes for close at 1733113590964Writing region close event to WAL at 1733113590964Closed at 1733113590964 2024-12-02T04:26:30,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742244_1420 (size=68) 2024-12-02T04:26:30,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742244_1420 (size=68) 2024-12-02T04:26:30,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742244_1420 (size=68) 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing e157d59036ec75da7d8bb22d76140eb3, disabling compactions & flushes 2024-12-02T04:26:30,967 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. after waiting 0 ms 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:30,967 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:30,967 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for e157d59036ec75da7d8bb22d76140eb3: Waiting for close lock at 1733113590967Disabling compacts and flushes for region at 1733113590967Disabling writes for close at 1733113590967Writing region close event to WAL at 1733113590967Closed at 1733113590967 2024-12-02T04:26:30,968 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:30,969 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733113590968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113590968"}]},"ts":"1733113590968"} 2024-12-02T04:26:30,969 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733113590968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113590968"}]},"ts":"1733113590968"} 2024-12-02T04:26:30,971 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:30,972 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:30,972 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113590972"}]},"ts":"1733113590972"} 2024-12-02T04:26:30,974 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T04:26:30,975 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:30,976 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:30,976 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:30,976 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:30,976 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:30,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, ASSIGN}] 2024-12-02T04:26:30,978 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, ASSIGN 2024-12-02T04:26:30,978 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, ASSIGN 2024-12-02T04:26:30,979 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:30,979 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:31,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T04:26:31,129 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:31,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=1a7d4e8475fbf118af165732278e70ec, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:31,129 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=e157d59036ec75da7d8bb22d76140eb3, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:31,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, ASSIGN because future has completed 2024-12-02T04:26:31,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:31,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, ASSIGN because future has completed 2024-12-02T04:26:31,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T04:26:31,286 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:31,286 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => e157d59036ec75da7d8bb22d76140eb3, NAME => 'testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:26:31,287 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 1a7d4e8475fbf118af165732278e70ec, NAME => 'testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. service=AccessControlService 2024-12-02T04:26:31,287 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. service=AccessControlService 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,287 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,289 INFO [StoreOpener-1a7d4e8475fbf118af165732278e70ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,289 INFO [StoreOpener-e157d59036ec75da7d8bb22d76140eb3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,290 INFO [StoreOpener-e157d59036ec75da7d8bb22d76140eb3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e157d59036ec75da7d8bb22d76140eb3 columnFamilyName cf 2024-12-02T04:26:31,290 INFO [StoreOpener-1a7d4e8475fbf118af165732278e70ec-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a7d4e8475fbf118af165732278e70ec columnFamilyName cf 2024-12-02T04:26:31,291 DEBUG [StoreOpener-1a7d4e8475fbf118af165732278e70ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:31,291 DEBUG [StoreOpener-e157d59036ec75da7d8bb22d76140eb3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:31,291 INFO [StoreOpener-e157d59036ec75da7d8bb22d76140eb3-1 {}] regionserver.HStore(327): Store=e157d59036ec75da7d8bb22d76140eb3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:31,291 INFO [StoreOpener-1a7d4e8475fbf118af165732278e70ec-1 {}] regionserver.HStore(327): Store=1a7d4e8475fbf118af165732278e70ec/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,294 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,294 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,295 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:31,295 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:31,296 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 1a7d4e8475fbf118af165732278e70ec; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70245745, jitterRate=0.04674316942691803}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:31,296 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened e157d59036ec75da7d8bb22d76140eb3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71271841, jitterRate=0.062033191323280334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:31,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for e157d59036ec75da7d8bb22d76140eb3: Running coprocessor pre-open hook at 1733113591287Writing region info on filesystem at 1733113591288 (+1 ms)Initializing all the Stores at 1733113591288Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113591288Cleaning up temporary data from old regions at 1733113591293 (+5 ms)Running coprocessor post-open hooks at 1733113591296 (+3 ms)Region opened successfully at 1733113591296 2024-12-02T04:26:31,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 1a7d4e8475fbf118af165732278e70ec: Running coprocessor pre-open hook at 1733113591288Writing region info on filesystem at 1733113591288Initializing all the Stores at 1733113591288Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113591288Cleaning up temporary data from old regions at 1733113591293 (+5 ms)Running coprocessor post-open hooks at 1733113591296 (+3 ms)Region opened successfully at 1733113591296 2024-12-02T04:26:31,297 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec., pid=173, masterSystemTime=1733113591284 2024-12-02T04:26:31,297 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3., pid=172, masterSystemTime=1733113591283 2024-12-02T04:26:31,298 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,298 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,299 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=1a7d4e8475fbf118af165732278e70ec, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:31,299 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:31,299 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:31,300 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=e157d59036ec75da7d8bb22d76140eb3, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:31,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:31,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:31,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-02T04:26:31,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088 in 169 msec 2024-12-02T04:26:31,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-02T04:26:31,303 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, ASSIGN in 326 msec 2024-12-02T04:26:31,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217 in 170 msec 2024-12-02T04:26:31,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-02T04:26:31,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, ASSIGN in 327 msec 2024-12-02T04:26:31,306 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:31,306 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113591306"}]},"ts":"1733113591306"} 2024-12-02T04:26:31,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T04:26:31,308 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:31,308 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T04:26:31,311 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T04:26:31,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:31,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:31,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:31,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:31,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:31,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:31,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:31,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:31,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 404 msec 2024-12-02T04:26:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T04:26:31,578 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T04:26:31,578 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:31,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-02T04:26:31,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,581 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:31,582 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:31,588 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:31,594 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:31,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T04:26:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113591596 (current time:1733113591596). 2024-12-02T04:26:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T04:26:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2adac57f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:31,598 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:31,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:31,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:31,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53d166c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:31,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:31,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,600 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:31,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e3eaf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:31,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:31,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:31,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:31,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,604 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b325867, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:31,606 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e40619, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:31,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,607 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:31,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4925a9d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:31,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:31,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:31,610 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:31,613 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:31,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:31,614 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T04:26:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T04:26:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T04:26:31,617 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T04:26:31,617 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:31,620 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742245_1421 (size=170) 2024-12-02T04:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742245_1421 (size=170) 2024-12-02T04:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742245_1421 (size=170) 2024-12-02T04:26:31,625 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:31,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3}] 2024-12-02T04:26:31,626 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,626 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T04:26:31,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T04:26:31,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T04:26:31,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T04:26:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T04:26:31,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-02T04:26:31,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-02T04:26:31,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for e157d59036ec75da7d8bb22d76140eb3: 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 1a7d4e8475fbf118af165732278e70ec: 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:31,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:31,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742246_1422 (size=71) 2024-12-02T04:26:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742246_1422 (size=71) 2024-12-02T04:26:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742247_1423 (size=71) 2024-12-02T04:26:31,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742247_1423 (size=71) 2024-12-02T04:26:31,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-02T04:26:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742247_1423 (size=71) 2024-12-02T04:26:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742246_1422 (size=71) 2024-12-02T04:26:31,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:31,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-02T04:26:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-02T04:26:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-02T04:26:31,786 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,786 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:31,786 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:31,787 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 in 161 msec 2024-12-02T04:26:31,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-02T04:26:31,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec in 162 msec 2024-12-02T04:26:31,788 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:31,789 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:31,790 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:31,790 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:31,790 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:31,790 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:26:31,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742248_1424 (size=63) 2024-12-02T04:26:31,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742248_1424 (size=63) 2024-12-02T04:26:31,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742248_1424 (size=63) 2024-12-02T04:26:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T04:26:32,202 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:32,202 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,203 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742249_1425 (size=653) 2024-12-02T04:26:32,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742249_1425 (size=653) 2024-12-02T04:26:32,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742249_1425 (size=653) 2024-12-02T04:26:32,220 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:32,231 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:32,231 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,234 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:32,234 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T04:26:32,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 619 msec 2024-12-02T04:26:32,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T04:26:32,248 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T04:26:32,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:32,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:32,257 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:32,259 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-02T04:26:32,259 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:32,260 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:32,261 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:32,267 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:32,274 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:32,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T04:26:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113592278 (current time:1733113592278). 2024-12-02T04:26:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T04:26:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5648559a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:32,280 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:32,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:32,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:32,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce06ff5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:32,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:32,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,281 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:32,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d6c0a3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:32,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:32,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:32,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33024, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:32,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,285 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:32,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d221165, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:32,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:32,287 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@588da70f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,288 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:32,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a252d67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:32,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:32,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:32,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:32,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:32,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:32,293 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:32,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:32,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:32,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:32,294 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:32,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T04:26:32,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:32,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T04:26:32,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T04:26:32,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:32,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T04:26:32,297 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:32,299 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:32,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742250_1426 (size=165) 2024-12-02T04:26:32,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742250_1426 (size=165) 2024-12-02T04:26:32,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742250_1426 (size=165) 2024-12-02T04:26:32,308 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:32,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3}] 2024-12-02T04:26:32,309 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:32,309 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:32,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T04:26:32,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-02T04:26:32,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-02T04:26:32,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:32,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:32,461 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 1a7d4e8475fbf118af165732278e70ec 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T04:26:32,461 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing e157d59036ec75da7d8bb22d76140eb3 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T04:26:32,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec is 71, key is 078fa015414bdce2f8d17141ea6daa43/cf:q/1733113592253/Put/seqid=0 2024-12-02T04:26:32,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 is 71, key is 1e30199cf00d7b73f66c55a448ea8d40/cf:q/1733113592255/Put/seqid=0 2024-12-02T04:26:32,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742251_1427 (size=5102) 2024-12-02T04:26:32,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742251_1427 (size=5102) 2024-12-02T04:26:32,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742251_1427 (size=5102) 2024-12-02T04:26:32,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:32,501 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:32,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742252_1428 (size=8172) 2024-12-02T04:26:32,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742252_1428 (size=8172) 2024-12-02T04:26:32,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/.tmp/cf/3e286fb6f4b441c4835c8ba6cbc0b658, store: [table=testtb-testExportExpiredSnapshot family=cf region=1a7d4e8475fbf118af165732278e70ec] 2024-12-02T04:26:32,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/.tmp/cf/3e286fb6f4b441c4835c8ba6cbc0b658 is 209, key is 0a415a53530e65e147d1f1a95f1578a37/cf:q/1733113592253/Put/seqid=0 2024-12-02T04:26:32,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742252_1428 (size=8172) 2024-12-02T04:26:32,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:32,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:32,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/.tmp/cf/6365a9a6b9a740958d9c5cf5142398bc, store: [table=testtb-testExportExpiredSnapshot family=cf region=e157d59036ec75da7d8bb22d76140eb3] 2024-12-02T04:26:32,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/.tmp/cf/6365a9a6b9a740958d9c5cf5142398bc is 209, key is 15abb614c3d0a59326065af875e41b95f/cf:q/1733113592255/Put/seqid=0 2024-12-02T04:26:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742253_1429 (size=5918) 2024-12-02T04:26:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742253_1429 (size=5918) 2024-12-02T04:26:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742253_1429 (size=5918) 2024-12-02T04:26:32,512 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/.tmp/cf/3e286fb6f4b441c4835c8ba6cbc0b658 2024-12-02T04:26:32,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742254_1430 (size=14999) 2024-12-02T04:26:32,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742254_1430 (size=14999) 2024-12-02T04:26:32,518 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/.tmp/cf/6365a9a6b9a740958d9c5cf5142398bc 2024-12-02T04:26:32,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742254_1430 (size=14999) 2024-12-02T04:26:32,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/.tmp/cf/3e286fb6f4b441c4835c8ba6cbc0b658 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658 2024-12-02T04:26:32,523 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658, entries=3, sequenceid=6, filesize=5.8 K 2024-12-02T04:26:32,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/.tmp/cf/6365a9a6b9a740958d9c5cf5142398bc as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc 2024-12-02T04:26:32,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 1a7d4e8475fbf118af165732278e70ec in 63ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:32,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-02T04:26:32,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 1a7d4e8475fbf118af165732278e70ec: 2024-12-02T04:26:32,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T04:26:32,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658] hfiles 2024-12-02T04:26:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,531 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc, entries=47, sequenceid=6, filesize=14.6 K 2024-12-02T04:26:32,532 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e157d59036ec75da7d8bb22d76140eb3 in 71ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:32,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for e157d59036ec75da7d8bb22d76140eb3: 2024-12-02T04:26:32,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T04:26:32,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:32,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc] hfiles 2024-12-02T04:26:32,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742255_1431 (size=110) 2024-12-02T04:26:32,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742255_1431 (size=110) 2024-12-02T04:26:32,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742255_1431 (size=110) 2024-12-02T04:26:32,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:32,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-02T04:26:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-02T04:26:32,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:32,541 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:32,543 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1a7d4e8475fbf118af165732278e70ec in 234 msec 2024-12-02T04:26:32,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742256_1432 (size=110) 2024-12-02T04:26:32,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742256_1432 (size=110) 2024-12-02T04:26:32,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742256_1432 (size=110) 2024-12-02T04:26:32,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:32,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-02T04:26:32,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-02T04:26:32,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:32,554 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:32,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-02T04:26:32,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e157d59036ec75da7d8bb22d76140eb3 in 247 msec 2024-12-02T04:26:32,556 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:32,557 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:32,558 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:32,558 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:32,558 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:32,559 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec] hfiles 2024-12-02T04:26:32,559 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:32,559 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742257_1433 (size=294) 2024-12-02T04:26:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742257_1433 (size=294) 2024-12-02T04:26:32,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742257_1433 (size=294) 2024-12-02T04:26:32,569 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:32,570 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,570 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742258_1434 (size=963) 2024-12-02T04:26:32,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742258_1434 (size=963) 2024-12-02T04:26:32,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742258_1434 (size=963) 2024-12-02T04:26:32,581 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:32,586 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:32,587 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:32,588 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:32,588 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T04:26:32,598 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 293 msec 2024-12-02T04:26:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T04:26:32,618 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T04:26:32,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-02T04:26:32,622 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:32,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-02T04:26:32,623 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T04:26:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742259_1435 (size=436) 2024-12-02T04:26:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742259_1435 (size=436) 2024-12-02T04:26:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742259_1435 (size=436) 2024-12-02T04:26:32,634 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 37a29921e433fcac8df396b670af8f39, NAME => 'testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:32,634 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d95ecee29e5f13e9036a6e1711d1a0f3, NAME => 'testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:32,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742260_1436 (size=61) 2024-12-02T04:26:32,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742260_1436 (size=61) 2024-12-02T04:26:32,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742260_1436 (size=61) 2024-12-02T04:26:32,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742261_1437 (size=61) 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 37a29921e433fcac8df396b670af8f39, disabling compactions & flushes 2024-12-02T04:26:32,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742261_1437 (size=61) 2024-12-02T04:26:32,646 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742261_1437 (size=61) 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. after waiting 0 ms 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,646 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 37a29921e433fcac8df396b670af8f39: Waiting for close lock at 1733113592646Disabling compacts and flushes for region at 1733113592646Disabling writes for close at 1733113592646Writing region close event to WAL at 1733113592646Closed at 1733113592646 2024-12-02T04:26:32,646 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:32,647 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing d95ecee29e5f13e9036a6e1711d1a0f3, disabling compactions & flushes 2024-12-02T04:26:32,647 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,647 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,647 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. after waiting 0 ms 2024-12-02T04:26:32,647 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,647 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,647 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for d95ecee29e5f13e9036a6e1711d1a0f3: Waiting for close lock at 1733113592646Disabling compacts and flushes for region at 1733113592646Disabling writes for close at 1733113592647 (+1 ms)Writing region close event to WAL at 1733113592647Closed at 1733113592647 2024-12-02T04:26:32,648 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:32,648 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733113592648"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113592648"}]},"ts":"1733113592648"} 2024-12-02T04:26:32,648 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733113592648"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113592648"}]},"ts":"1733113592648"} 2024-12-02T04:26:32,650 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:32,651 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:32,651 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113592651"}]},"ts":"1733113592651"} 2024-12-02T04:26:32,653 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T04:26:32,654 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:32,655 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:32,655 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:32,655 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:32,655 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:32,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d95ecee29e5f13e9036a6e1711d1a0f3, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=37a29921e433fcac8df396b670af8f39, ASSIGN}] 2024-12-02T04:26:32,657 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=37a29921e433fcac8df396b670af8f39, ASSIGN 2024-12-02T04:26:32,657 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d95ecee29e5f13e9036a6e1711d1a0f3, ASSIGN 2024-12-02T04:26:32,657 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d95ecee29e5f13e9036a6e1711d1a0f3, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:32,657 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=37a29921e433fcac8df396b670af8f39, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:32,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T04:26:32,808 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:32,808 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d95ecee29e5f13e9036a6e1711d1a0f3, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:32,808 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=37a29921e433fcac8df396b670af8f39, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:32,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d95ecee29e5f13e9036a6e1711d1a0f3, ASSIGN because future has completed 2024-12-02T04:26:32,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:32,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=37a29921e433fcac8df396b670af8f39, ASSIGN because future has completed 2024-12-02T04:26:32,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37a29921e433fcac8df396b670af8f39, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:32,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T04:26:32,965 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,965 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => d95ecee29e5f13e9036a6e1711d1a0f3, NAME => 'testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 37a29921e433fcac8df396b670af8f39, NAME => 'testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. service=AccessControlService 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. service=AccessControlService 2024-12-02T04:26:32,965 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:32,965 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,965 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,966 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,967 INFO [StoreOpener-37a29921e433fcac8df396b670af8f39-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,967 INFO [StoreOpener-d95ecee29e5f13e9036a6e1711d1a0f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,968 INFO [StoreOpener-37a29921e433fcac8df396b670af8f39-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 37a29921e433fcac8df396b670af8f39 columnFamilyName cf 2024-12-02T04:26:32,968 INFO [StoreOpener-d95ecee29e5f13e9036a6e1711d1a0f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d95ecee29e5f13e9036a6e1711d1a0f3 columnFamilyName cf 2024-12-02T04:26:32,969 DEBUG [StoreOpener-d95ecee29e5f13e9036a6e1711d1a0f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:32,969 DEBUG [StoreOpener-37a29921e433fcac8df396b670af8f39-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:32,969 INFO [StoreOpener-d95ecee29e5f13e9036a6e1711d1a0f3-1 {}] regionserver.HStore(327): Store=d95ecee29e5f13e9036a6e1711d1a0f3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:32,969 INFO [StoreOpener-37a29921e433fcac8df396b670af8f39-1 {}] regionserver.HStore(327): Store=37a29921e433fcac8df396b670af8f39/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:32,969 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,969 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,970 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,972 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,972 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,973 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:32,974 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:32,974 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 37a29921e433fcac8df396b670af8f39; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74824072, jitterRate=0.11496555805206299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:32,974 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:32,974 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened d95ecee29e5f13e9036a6e1711d1a0f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64764341, jitterRate=-0.03493611514568329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:32,974 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:32,974 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 37a29921e433fcac8df396b670af8f39: Running coprocessor pre-open hook at 1733113592966Writing region info on filesystem at 1733113592966Initializing all the Stores at 1733113592966Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113592966Cleaning up temporary data from old regions at 1733113592970 (+4 ms)Running coprocessor post-open hooks at 1733113592974 (+4 ms)Region opened successfully at 1733113592974 2024-12-02T04:26:32,974 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for d95ecee29e5f13e9036a6e1711d1a0f3: Running coprocessor pre-open hook at 1733113592966Writing region info on filesystem at 1733113592966Initializing all the Stores at 1733113592966Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113592966Cleaning up temporary data from old regions at 1733113592970 (+4 ms)Running coprocessor post-open hooks at 1733113592974 (+4 ms)Region opened successfully at 1733113592974 2024-12-02T04:26:32,975 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39., pid=184, masterSystemTime=1733113592962 2024-12-02T04:26:32,975 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3., pid=183, masterSystemTime=1733113592962 2024-12-02T04:26:32,977 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,977 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:32,977 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=37a29921e433fcac8df396b670af8f39, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:32,977 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,977 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:32,978 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d95ecee29e5f13e9036a6e1711d1a0f3, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:32,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37a29921e433fcac8df396b670af8f39, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:32,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:32,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-02T04:26:32,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 37a29921e433fcac8df396b670af8f39, server=e2eaa0f11f7e,41643,1733113342088 in 168 msec 2024-12-02T04:26:32,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-02T04:26:32,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=37a29921e433fcac8df396b670af8f39, ASSIGN in 326 msec 2024-12-02T04:26:32,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3, server=e2eaa0f11f7e,37413,1733113342217 in 171 msec 2024-12-02T04:26:32,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-02T04:26:32,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d95ecee29e5f13e9036a6e1711d1a0f3, ASSIGN in 327 msec 2024-12-02T04:26:32,985 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:32,985 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113592985"}]},"ts":"1733113592985"} 2024-12-02T04:26:32,987 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T04:26:32,988 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:32,988 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T04:26:32,991 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T04:26:33,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:33,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:33,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:33,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:33,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 481 msec 2024-12-02T04:26:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T04:26:33,248 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T04:26:33,248 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-02T04:26:33,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:33,251 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:33,252 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,257 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,262 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:33,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:33,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,276 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-02T04:26:33,276 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:33,276 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:33,278 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,283 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T04:26:33,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T04:26:33,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T04:26:33,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a0f3e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:33,291 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769f35a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:33,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:33,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,292 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:33,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a8a735b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:33,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:33,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,296 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b3eba2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:33,298 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:33,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:33,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:33,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49d78a2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:33,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:33,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,300 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37302, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:33,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@786be000, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:33,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:33,304 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:33,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:33,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:33,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58500, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:33,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:33,307 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T04:26:33,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T04:26:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T04:26:33,310 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T04:26:33,310 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:33,312 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:33,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742262_1438 (size=152) 2024-12-02T04:26:33,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742262_1438 (size=152) 2024-12-02T04:26:33,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742262_1438 (size=152) 2024-12-02T04:26:33,318 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:33,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 37a29921e433fcac8df396b670af8f39}] 2024-12-02T04:26:33,319 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:33,319 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T04:26:33,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-02T04:26:33,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-02T04:26:33,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:33,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:33,470 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing d95ecee29e5f13e9036a6e1711d1a0f3 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-02T04:26:33,470 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 37a29921e433fcac8df396b670af8f39 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-02T04:26:33,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c6f77b1e4ad4ed3bcad5051c58da438_d95ecee29e5f13e9036a6e1711d1a0f3 is 71, key is 04e717732e120b635a5fd95b42d3ad6d/cf:q/1733113593270/Put/seqid=0 2024-12-02T04:26:33,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202af1e76cf6aba4de7819f6aaf9afaa424_37a29921e433fcac8df396b670af8f39 is 71, key is 18b9bcb2e8fc7f2d109c4004b3717df0/cf:q/1733113593271/Put/seqid=0 2024-12-02T04:26:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742264_1440 (size=8031) 2024-12-02T04:26:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742264_1440 (size=8031) 2024-12-02T04:26:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742263_1439 (size=5241) 2024-12-02T04:26:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742264_1440 (size=8031) 2024-12-02T04:26:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742263_1439 (size=5241) 2024-12-02T04:26:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742263_1439 (size=5241) 2024-12-02T04:26:33,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:33,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:33,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412025c6f77b1e4ad4ed3bcad5051c58da438_d95ecee29e5f13e9036a6e1711d1a0f3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202412025c6f77b1e4ad4ed3bcad5051c58da438_d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:33,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202af1e76cf6aba4de7819f6aaf9afaa424_37a29921e433fcac8df396b670af8f39 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202af1e76cf6aba4de7819f6aaf9afaa424_37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:33,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/.tmp/cf/fa570fd051f242928f23c19862dbc71e, store: [table=testExportExpiredSnapshot family=cf region=d95ecee29e5f13e9036a6e1711d1a0f3] 2024-12-02T04:26:33,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/.tmp/cf/31213878af0048b1be6698aa021192a7, store: [table=testExportExpiredSnapshot family=cf region=37a29921e433fcac8df396b670af8f39] 2024-12-02T04:26:33,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/.tmp/cf/fa570fd051f242928f23c19862dbc71e is 202, key is 0a5bf796efd5f457170a8fd3af5a44b1b/cf:q/1733113593270/Put/seqid=0 2024-12-02T04:26:33,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/.tmp/cf/31213878af0048b1be6698aa021192a7 is 202, key is 1fea1eb51d9897809d67c7d62c38da294/cf:q/1733113593271/Put/seqid=0 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742266_1442 (size=6284) 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742265_1441 (size=14265) 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742266_1442 (size=6284) 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742266_1442 (size=6284) 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742265_1441 (size=14265) 2024-12-02T04:26:33,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742265_1441 (size=14265) 2024-12-02T04:26:33,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/.tmp/cf/fa570fd051f242928f23c19862dbc71e 2024-12-02T04:26:33,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/.tmp/cf/31213878af0048b1be6698aa021192a7 2024-12-02T04:26:33,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/.tmp/cf/fa570fd051f242928f23c19862dbc71e as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/cf/fa570fd051f242928f23c19862dbc71e 2024-12-02T04:26:33,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/.tmp/cf/31213878af0048b1be6698aa021192a7 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/cf/31213878af0048b1be6698aa021192a7 2024-12-02T04:26:33,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/cf/31213878af0048b1be6698aa021192a7, entries=45, sequenceid=5, filesize=13.9 K 2024-12-02T04:26:33,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/cf/fa570fd051f242928f23c19862dbc71e, entries=5, sequenceid=5, filesize=6.1 K 2024-12-02T04:26:33,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for d95ecee29e5f13e9036a6e1711d1a0f3 in 39ms, sequenceid=5, compaction requested=false 2024-12-02T04:26:33,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 37a29921e433fcac8df396b670af8f39 in 39ms, sequenceid=5, compaction requested=false 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 37a29921e433fcac8df396b670af8f39: 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for d95ecee29e5f13e9036a6e1711d1a0f3: 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T04:26:33,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/cf/31213878af0048b1be6698aa021192a7] hfiles 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/cf/fa570fd051f242928f23c19862dbc71e] hfiles 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/cf/fa570fd051f242928f23c19862dbc71e for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/cf/31213878af0048b1be6698aa021192a7 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742268_1444 (size=103) 2024-12-02T04:26:33,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742268_1444 (size=103) 2024-12-02T04:26:33,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742268_1444 (size=103) 2024-12-02T04:26:33,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:26:33,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-02T04:26:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-02T04:26:33,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:33,517 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:33,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742267_1443 (size=103) 2024-12-02T04:26:33,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742267_1443 (size=103) 2024-12-02T04:26:33,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742267_1443 (size=103) 2024-12-02T04:26:33,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:26:33,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-02T04:26:33,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-02T04:26:33,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:33,519 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:33,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 37a29921e433fcac8df396b670af8f39 in 200 msec 2024-12-02T04:26:33,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-02T04:26:33,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d95ecee29e5f13e9036a6e1711d1a0f3 in 201 msec 2024-12-02T04:26:33,521 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:33,521 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:33,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:33,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:33,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:33,523 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202af1e76cf6aba4de7819f6aaf9afaa424_37a29921e433fcac8df396b670af8f39, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202412025c6f77b1e4ad4ed3bcad5051c58da438_d95ecee29e5f13e9036a6e1711d1a0f3] hfiles 2024-12-02T04:26:33,523 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202af1e76cf6aba4de7819f6aaf9afaa424_37a29921e433fcac8df396b670af8f39 2024-12-02T04:26:33,523 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202412025c6f77b1e4ad4ed3bcad5051c58da438_d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:26:33,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742269_1445 (size=287) 2024-12-02T04:26:33,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742269_1445 (size=287) 2024-12-02T04:26:33,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742269_1445 (size=287) 2024-12-02T04:26:33,529 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:33,529 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,529 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742270_1446 (size=935) 2024-12-02T04:26:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742270_1446 (size=935) 2024-12-02T04:26:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742270_1446 (size=935) 2024-12-02T04:26:33,545 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:33,551 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:33,551 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-02T04:26:33,552 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:33,552 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T04:26:33,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 244 msec 2024-12-02T04:26:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T04:26:33,627 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T04:26:33,851 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0007_000001 (auth:SIMPLE) from 127.0.0.1:38034 2024-12-02T04:26:33,861 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000001/launch_container.sh] 2024-12-02T04:26:33,861 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000001/container_tokens] 2024-12-02T04:26:33,861 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0007/container_1733113348655_0007_01_000001/sysfs] 2024-12-02T04:26:35,579 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:26:41,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T04:26:41,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T04:26:43,636 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113603635 2024-12-02T04:26:43,636 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113603635, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113603635, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:43,665 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:43,665 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113603635, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113603635/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T04:26:43,668 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:26:43,669 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:26:43,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T04:26:43,672 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113603672"}]},"ts":"1733113603672"} 2024-12-02T04:26:43,674 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-02T04:26:43,674 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-02T04:26:43,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-02T04:26:43,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, UNASSIGN}] 2024-12-02T04:26:43,676 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, UNASSIGN 2024-12-02T04:26:43,676 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, UNASSIGN 2024-12-02T04:26:43,677 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=1a7d4e8475fbf118af165732278e70ec, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:43,677 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e157d59036ec75da7d8bb22d76140eb3, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:43,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, UNASSIGN because future has completed 2024-12-02T04:26:43,678 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:43,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:43,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, UNASSIGN because future has completed 2024-12-02T04:26:43,679 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:43,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:43,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T04:26:43,832 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:43,832 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:43,832 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 1a7d4e8475fbf118af165732278e70ec, disabling compactions & flushes 2024-12-02T04:26:43,832 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:43,832 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. after waiting 0 ms 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:43,833 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing e157d59036ec75da7d8bb22d76140eb3, disabling compactions & flushes 2024-12-02T04:26:43,833 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. after waiting 0 ms 2024-12-02T04:26:43,833 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:43,841 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:43,842 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:43,842 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:43,842 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec. 2024-12-02T04:26:43,842 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 1a7d4e8475fbf118af165732278e70ec: Waiting for close lock at 1733113603832Running coprocessor pre-close hooks at 1733113603832Disabling compacts and flushes for region at 1733113603832Disabling writes for close at 1733113603833 (+1 ms)Writing region close event to WAL at 1733113603834 (+1 ms)Running coprocessor post-close hooks at 1733113603842 (+8 ms)Closed at 1733113603842 2024-12-02T04:26:43,842 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:43,842 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3. 2024-12-02T04:26:43,842 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for e157d59036ec75da7d8bb22d76140eb3: Waiting for close lock at 1733113603833Running coprocessor pre-close hooks at 1733113603833Disabling compacts and flushes for region at 1733113603833Disabling writes for close at 1733113603833Writing region close event to WAL at 1733113603835 (+2 ms)Running coprocessor post-close hooks at 1733113603842 (+7 ms)Closed at 1733113603842 2024-12-02T04:26:43,844 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:43,844 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=1a7d4e8475fbf118af165732278e70ec, regionState=CLOSED 2024-12-02T04:26:43,844 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:43,845 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e157d59036ec75da7d8bb22d76140eb3, regionState=CLOSED 2024-12-02T04:26:43,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:43,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:43,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-02T04:26:43,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 1a7d4e8475fbf118af165732278e70ec, server=e2eaa0f11f7e,41643,1733113342088 in 169 msec 2024-12-02T04:26:43,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-02T04:26:43,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure e157d59036ec75da7d8bb22d76140eb3, server=e2eaa0f11f7e,37413,1733113342217 in 168 msec 2024-12-02T04:26:43,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1a7d4e8475fbf118af165732278e70ec, UNASSIGN in 174 msec 2024-12-02T04:26:43,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-02T04:26:43,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e157d59036ec75da7d8bb22d76140eb3, UNASSIGN in 174 msec 2024-12-02T04:26:43,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-02T04:26:43,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-12-02T04:26:43,855 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113603854"}]},"ts":"1733113603854"} 2024-12-02T04:26:43,856 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-02T04:26:43,856 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-02T04:26:43,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 186 msec 2024-12-02T04:26:43,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T04:26:43,989 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T04:26:43,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,995 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-02T04:26:43,997 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,000 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,002 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:44,002 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:44,004 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/recovered.edits] 2024-12-02T04:26:44,004 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/recovered.edits] 2024-12-02T04:26:44,007 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/cf/6365a9a6b9a740958d9c5cf5142398bc 2024-12-02T04:26:44,007 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/cf/3e286fb6f4b441c4835c8ba6cbc0b658 2024-12-02T04:26:44,010 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3/recovered.edits/9.seqid 2024-12-02T04:26:44,010 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec/recovered.edits/9.seqid 2024-12-02T04:26:44,010 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:44,010 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportExpiredSnapshot/1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:44,010 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-02T04:26:44,010 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-02T04:26:44,011 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-02T04:26:44,014 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202ad7b859537f04ec4afd95c16f7cafb94_e157d59036ec75da7d8bb22d76140eb3 2024-12-02T04:26:44,015 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412026e38cc11f8934a0caf31d27b7a1bdf1e_1a7d4e8475fbf118af165732278e70ec 2024-12-02T04:26:44,015 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-02T04:26:44,017 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,019 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-02T04:26:44,020 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-02T04:26:44,021 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,021 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-02T04:26:44,021 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113604021"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:44,021 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113604021"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:44,023 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:26:44,023 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1a7d4e8475fbf118af165732278e70ec, NAME => 'testtb-testExportExpiredSnapshot,,1733113590947.1a7d4e8475fbf118af165732278e70ec.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e157d59036ec75da7d8bb22d76140eb3, NAME => 'testtb-testExportExpiredSnapshot,1,1733113590947.e157d59036ec75da7d8bb22d76140eb3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:26:44,023 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-02T04:26:44,023 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113604023"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:44,024 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-02T04:26:44,025 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 35 msec 2024-12-02T04:26:44,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T04:26:44,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T04:26:44,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T04:26:44,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-02T04:26:44,084 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,084 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-02T04:26:44,084 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T04:26:44,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T04:26:44,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-02T04:26:44,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-02T04:26:44,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-02T04:26:44,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T04:26:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-02T04:26:44,117 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=806 (was 817), OpenFileDescriptor=795 (was 818), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=713 (was 796), ProcessCount=11 (was 17), AvailableMemoryMB=3125 (was 2451) - AvailableMemoryMB LEAK? - 2024-12-02T04:26:44,117 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-02T04:26:44,131 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=806, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=713, ProcessCount=11, AvailableMemoryMB=3121 2024-12-02T04:26:44,131 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-02T04:26:44,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:44,134 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:44,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-02T04:26:44,135 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:44,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T04:26:44,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742271_1447 (size=448) 2024-12-02T04:26:44,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742271_1447 (size=448) 2024-12-02T04:26:44,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742271_1447 (size=448) 2024-12-02T04:26:44,142 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 910c9909767adee358a4eb402bd170a1, NAME => 'testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:44,143 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 99c47cfef64fdc9a62ecfdcadd2900a5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742273_1449 (size=73) 2024-12-02T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742272_1448 (size=73) 2024-12-02T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742272_1448 (size=73) 2024-12-02T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742272_1448 (size=73) 2024-12-02T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742273_1449 (size=73) 2024-12-02T04:26:44,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742273_1449 (size=73) 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 99c47cfef64fdc9a62ecfdcadd2900a5, disabling compactions & flushes 2024-12-02T04:26:44,151 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. after waiting 0 ms 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,151 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:44,151 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 99c47cfef64fdc9a62ecfdcadd2900a5: Waiting for close lock at 1733113604151Disabling compacts and flushes for region at 1733113604151Disabling writes for close at 1733113604151Writing region close event to WAL at 1733113604151Closed at 1733113604151 2024-12-02T04:26:44,152 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 910c9909767adee358a4eb402bd170a1, disabling compactions & flushes 2024-12-02T04:26:44,152 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,152 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,152 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. after waiting 0 ms 2024-12-02T04:26:44,152 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,152 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,152 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 910c9909767adee358a4eb402bd170a1: Waiting for close lock at 1733113604151Disabling compacts and flushes for region at 1733113604151Disabling writes for close at 1733113604152 (+1 ms)Writing region close event to WAL at 1733113604152Closed at 1733113604152 2024-12-02T04:26:44,153 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:44,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733113604153"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113604153"}]},"ts":"1733113604153"} 2024-12-02T04:26:44,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733113604153"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113604153"}]},"ts":"1733113604153"} 2024-12-02T04:26:44,155 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:44,156 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:44,156 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113604156"}]},"ts":"1733113604156"} 2024-12-02T04:26:44,157 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T04:26:44,157 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:44,158 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:44,158 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:44,158 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:44,158 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:44,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, ASSIGN}] 2024-12-02T04:26:44,160 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, ASSIGN 2024-12-02T04:26:44,160 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, ASSIGN 2024-12-02T04:26:44,160 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:44,161 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:26:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T04:26:44,311 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:44,312 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=910c9909767adee358a4eb402bd170a1, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:44,312 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=99c47cfef64fdc9a62ecfdcadd2900a5, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:44,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, ASSIGN because future has completed 2024-12-02T04:26:44,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:44,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, ASSIGN because future has completed 2024-12-02T04:26:44,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:44,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T04:26:44,474 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,474 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 910c9909767adee358a4eb402bd170a1, NAME => 'testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:26:44,475 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. service=AccessControlService 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 99c47cfef64fdc9a62ecfdcadd2900a5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:26:44,475 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. service=AccessControlService 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:44,475 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,475 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:44,476 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,476 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,477 INFO [StoreOpener-910c9909767adee358a4eb402bd170a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,477 INFO [StoreOpener-99c47cfef64fdc9a62ecfdcadd2900a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,479 INFO [StoreOpener-910c9909767adee358a4eb402bd170a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 910c9909767adee358a4eb402bd170a1 columnFamilyName cf 2024-12-02T04:26:44,479 INFO [StoreOpener-99c47cfef64fdc9a62ecfdcadd2900a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99c47cfef64fdc9a62ecfdcadd2900a5 columnFamilyName cf 2024-12-02T04:26:44,480 DEBUG [StoreOpener-910c9909767adee358a4eb402bd170a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:44,480 DEBUG [StoreOpener-99c47cfef64fdc9a62ecfdcadd2900a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:44,480 INFO [StoreOpener-99c47cfef64fdc9a62ecfdcadd2900a5-1 {}] regionserver.HStore(327): Store=99c47cfef64fdc9a62ecfdcadd2900a5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:44,480 INFO [StoreOpener-910c9909767adee358a4eb402bd170a1-1 {}] regionserver.HStore(327): Store=910c9909767adee358a4eb402bd170a1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:44,481 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,481 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,482 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,483 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,484 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,484 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:44,486 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 99c47cfef64fdc9a62ecfdcadd2900a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72599259, jitterRate=0.0818132609128952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:44,486 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 910c9909767adee358a4eb402bd170a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74124564, jitterRate=0.104542076587677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 99c47cfef64fdc9a62ecfdcadd2900a5: Running coprocessor pre-open hook at 1733113604476Writing region info on filesystem at 1733113604476Initializing all the Stores at 1733113604477 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113604477Cleaning up temporary data from old regions at 1733113604482 (+5 ms)Running coprocessor post-open hooks at 1733113604486 (+4 ms)Region opened successfully at 1733113604486 2024-12-02T04:26:44,486 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 910c9909767adee358a4eb402bd170a1: Running coprocessor pre-open hook at 1733113604475Writing region info on filesystem at 1733113604475Initializing all the Stores at 1733113604476 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113604477 (+1 ms)Cleaning up temporary data from old regions at 1733113604483 (+6 ms)Running coprocessor post-open hooks at 1733113604486 (+3 ms)Region opened successfully at 1733113604486 2024-12-02T04:26:44,487 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1., pid=198, masterSystemTime=1733113604468 2024-12-02T04:26:44,487 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5., pid=199, masterSystemTime=1733113604469 2024-12-02T04:26:44,488 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,488 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,489 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=99c47cfef64fdc9a62ecfdcadd2900a5, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:44,489 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,489 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,489 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=910c9909767adee358a4eb402bd170a1, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:44,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:44,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:44,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-02T04:26:44,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088 in 174 msec 2024-12-02T04:26:44,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-02T04:26:44,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217 in 176 msec 2024-12-02T04:26:44,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, ASSIGN in 333 msec 2024-12-02T04:26:44,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-02T04:26:44,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, ASSIGN in 334 msec 2024-12-02T04:26:44,495 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:44,495 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113604495"}]},"ts":"1733113604495"} 2024-12-02T04:26:44,496 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T04:26:44,497 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:44,497 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-02T04:26:44,499 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:26:44,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:44,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 408 msec 2024-12-02T04:26:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T04:26:44,759 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T04:26:44,759 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:44,765 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-02T04:26:44,765 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,766 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:44,769 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:44,775 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:44,779 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:44,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:26:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113604780 (current time:1733113604780). 2024-12-02T04:26:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T04:26:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23d6af0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:44,782 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c157457, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:44,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,783 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:44,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a57c3cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:44,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:44,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:44,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:44,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,786 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38737d71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:44,787 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36c3847d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:44,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,788 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:44,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7399b6c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:44,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:44,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:44,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:44,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:44,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:44,793 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54466, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:44,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:44,794 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:26:44,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:26:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T04:26:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T04:26:44,796 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:44,797 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:44,799 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:44,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742274_1450 (size=185) 2024-12-02T04:26:44,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742274_1450 (size=185) 2024-12-02T04:26:44,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742274_1450 (size=185) 2024-12-02T04:26:44,809 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:44,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5}] 2024-12-02T04:26:44,810 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:44,810 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:44,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T04:26:44,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-02T04:26:44,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-02T04:26:44,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:44,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 910c9909767adee358a4eb402bd170a1: 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 99c47cfef64fdc9a62ecfdcadd2900a5: 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:44,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:45,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742275_1451 (size=76) 2024-12-02T04:26:45,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742276_1452 (size=76) 2024-12-02T04:26:45,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742275_1451 (size=76) 2024-12-02T04:26:45,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742276_1452 (size=76) 2024-12-02T04:26:45,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742275_1451 (size=76) 2024-12-02T04:26:45,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:45,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-02T04:26:45,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742276_1452 (size=76) 2024-12-02T04:26:45,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:45,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-02T04:26:45,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-02T04:26:45,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-02T04:26:45,010 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,010 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,010 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 in 202 msec 2024-12-02T04:26:45,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-12-02T04:26:45,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 in 202 msec 2024-12-02T04:26:45,012 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:45,013 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:45,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:45,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:45,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:45,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:26:45,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742277_1453 (size=68) 2024-12-02T04:26:45,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742277_1453 (size=68) 2024-12-02T04:26:45,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742277_1453 (size=68) 2024-12-02T04:26:45,020 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:45,020 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,021 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742278_1454 (size=673) 2024-12-02T04:26:45,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742278_1454 (size=673) 2024-12-02T04:26:45,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742278_1454 (size=673) 2024-12-02T04:26:45,029 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:45,033 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:45,033 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,034 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:45,034 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T04:26:45,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 239 msec 2024-12-02T04:26:45,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T04:26:45,118 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T04:26:45,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37413 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:45,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:45,126 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:45,128 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-02T04:26:45,128 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:45,128 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:45,130 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:45,133 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:45,138 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T04:26:45,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:26:45,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113605140 (current time:1733113605140). 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8525e37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:45,142 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:45,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:45,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:45,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@147850d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:45,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:45,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,143 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:45,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29d15c61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:45,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:45,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:45,145 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52396, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:45,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:26:45,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:45,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,146 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3327ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:45,148 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cc2c73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:45,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,149 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:45,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f3a33f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:45,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:45,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:45,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:45,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:45,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:45,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:45,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:45,154 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209. 2024-12-02T04:26:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:45,154 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:45,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T04:26:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:45,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T04:26:45,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T04:26:45,157 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:45,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T04:26:45,158 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:45,159 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742279_1455 (size=180) 2024-12-02T04:26:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742279_1455 (size=180) 2024-12-02T04:26:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742279_1455 (size=180) 2024-12-02T04:26:45,165 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:45,165 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5}] 2024-12-02T04:26:45,166 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,166 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T04:26:45,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-02T04:26:45,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37413 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-02T04:26:45,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:45,318 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 99c47cfef64fdc9a62ecfdcadd2900a5 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T04:26:45,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:45,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 910c9909767adee358a4eb402bd170a1 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T04:26:45,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 is 71, key is 0aea8b8c6b8ef8d122731bf7cf546986/cf:q/1733113605122/Put/seqid=0 2024-12-02T04:26:45,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 is 71, key is 109afd8c7f71fb88d9caa7aa09982117/cf:q/1733113605125/Put/seqid=0 2024-12-02T04:26:45,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742280_1456 (size=5172) 2024-12-02T04:26:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742281_1457 (size=8101) 2024-12-02T04:26:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742281_1457 (size=8101) 2024-12-02T04:26:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742280_1456 (size=5172) 2024-12-02T04:26:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742280_1456 (size=5172) 2024-12-02T04:26:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742281_1457 (size=8101) 2024-12-02T04:26:45,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:45,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:45,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/.tmp/cf/7105d151e48e4039a0083b0eb67f3f06, store: [table=testtb-testEmptyExportFileSystemState family=cf region=99c47cfef64fdc9a62ecfdcadd2900a5] 2024-12-02T04:26:45,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/.tmp/cf/0da0b558631b4807b6c084d8714c7c21, store: [table=testtb-testEmptyExportFileSystemState family=cf region=910c9909767adee358a4eb402bd170a1] 2024-12-02T04:26:45,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/.tmp/cf/7105d151e48e4039a0083b0eb67f3f06 is 214, key is 1ef8eb1b2d43aaa37bfe2a7167d05c0ac/cf:q/1733113605125/Put/seqid=0 2024-12-02T04:26:45,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/.tmp/cf/0da0b558631b4807b6c084d8714c7c21 is 214, key is 097509f1cae073ed8fcb8c51f439e631a/cf:q/1733113605122/Put/seqid=0 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742282_1458 (size=6148) 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742283_1459 (size=15027) 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742282_1458 (size=6148) 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742283_1459 (size=15027) 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742282_1458 (size=6148) 2024-12-02T04:26:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742283_1459 (size=15027) 2024-12-02T04:26:45,350 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/.tmp/cf/0da0b558631b4807b6c084d8714c7c21 2024-12-02T04:26:45,350 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/.tmp/cf/7105d151e48e4039a0083b0eb67f3f06 2024-12-02T04:26:45,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/.tmp/cf/0da0b558631b4807b6c084d8714c7c21 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21 2024-12-02T04:26:45,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/.tmp/cf/7105d151e48e4039a0083b0eb67f3f06 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06 2024-12-02T04:26:45,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21, entries=4, sequenceid=6, filesize=6.0 K 2024-12-02T04:26:45,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06, entries=46, sequenceid=6, filesize=14.7 K 2024-12-02T04:26:45,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 910c9909767adee358a4eb402bd170a1 in 39ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-02T04:26:45,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 99c47cfef64fdc9a62ecfdcadd2900a5 in 40ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 910c9909767adee358a4eb402bd170a1: 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 99c47cfef64fdc9a62ecfdcadd2900a5: 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21] hfiles 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06] hfiles 2024-12-02T04:26:45,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742285_1461 (size=115) 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742284_1460 (size=115) 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742285_1461 (size=115) 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742284_1460 (size=115) 2024-12-02T04:26:45,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742285_1461 (size=115) 2024-12-02T04:26:45,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-02T04:26:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742284_1460 (size=115) 2024-12-02T04:26:45,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:45,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-02T04:26:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-02T04:26:45,364 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-02T04:26:45,364 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,364 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5 in 200 msec 2024-12-02T04:26:45,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=204, resume processing ppid=203 2024-12-02T04:26:45,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 910c9909767adee358a4eb402bd170a1 in 200 msec 2024-12-02T04:26:45,366 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:45,367 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:45,367 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:45,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:45,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:45,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1] hfiles 2024-12-02T04:26:45,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:45,368 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742286_1462 (size=299) 2024-12-02T04:26:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742286_1462 (size=299) 2024-12-02T04:26:45,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742286_1462 (size=299) 2024-12-02T04:26:45,375 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:45,375 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,375 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742287_1463 (size=983) 2024-12-02T04:26:45,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742287_1463 (size=983) 2024-12-02T04:26:45,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742287_1463 (size=983) 2024-12-02T04:26:45,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:45,392 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:45,392 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,394 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:45,394 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T04:26:45,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 239 msec 2024-12-02T04:26:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T04:26:45,478 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T04:26:45,478 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478 2024-12-02T04:26:45,479 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:45,507 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:45,507 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,509 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:26:45,512 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:45,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742288_1464 (size=185) 2024-12-02T04:26:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742289_1465 (size=673) 2024-12-02T04:26:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742289_1465 (size=673) 2024-12-02T04:26:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742289_1465 (size=673) 2024-12-02T04:26:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742288_1464 (size=185) 2024-12-02T04:26:45,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742288_1464 (size=185) 2024-12-02T04:26:45,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:45,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:45,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-257813034500594133.jar 2024-12-02T04:26:46,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-14925343534753791248.jar 2024-12-02T04:26:46,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:46,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:26:46,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:26:46,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:26:46,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:26:46,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:26:46,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:26:46,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:26:46,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:26:46,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:26:46,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:26:46,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:46,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:46,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742290_1466 (size=131440) 2024-12-02T04:26:46,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742290_1466 (size=131440) 2024-12-02T04:26:46,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742290_1466 (size=131440) 2024-12-02T04:26:46,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742291_1467 (size=440957) 2024-12-02T04:26:46,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742291_1467 (size=440957) 2024-12-02T04:26:46,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742291_1467 (size=440957) 2024-12-02T04:26:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742292_1468 (size=4188619) 2024-12-02T04:26:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742292_1468 (size=4188619) 2024-12-02T04:26:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742292_1468 (size=4188619) 2024-12-02T04:26:46,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742293_1469 (size=1323991) 2024-12-02T04:26:46,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742293_1469 (size=1323991) 2024-12-02T04:26:46,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742293_1469 (size=1323991) 2024-12-02T04:26:46,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742294_1470 (size=903927) 2024-12-02T04:26:46,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742294_1470 (size=903927) 2024-12-02T04:26:46,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742294_1470 (size=903927) 2024-12-02T04:26:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742295_1471 (size=8360083) 2024-12-02T04:26:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742295_1471 (size=8360083) 2024-12-02T04:26:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742295_1471 (size=8360083) 2024-12-02T04:26:46,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742296_1472 (size=1877034) 2024-12-02T04:26:46,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742296_1472 (size=1877034) 2024-12-02T04:26:46,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742296_1472 (size=1877034) 2024-12-02T04:26:46,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742297_1473 (size=77835) 2024-12-02T04:26:46,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742297_1473 (size=77835) 2024-12-02T04:26:46,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742297_1473 (size=77835) 2024-12-02T04:26:46,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742298_1474 (size=30949) 2024-12-02T04:26:46,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742298_1474 (size=30949) 2024-12-02T04:26:46,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742298_1474 (size=30949) 2024-12-02T04:26:46,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742299_1475 (size=1597213) 2024-12-02T04:26:46,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742299_1475 (size=1597213) 2024-12-02T04:26:46,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742299_1475 (size=1597213) 2024-12-02T04:26:47,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742300_1476 (size=4695811) 2024-12-02T04:26:47,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742300_1476 (size=4695811) 2024-12-02T04:26:47,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742300_1476 (size=4695811) 2024-12-02T04:26:47,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742301_1477 (size=232957) 2024-12-02T04:26:47,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742301_1477 (size=232957) 2024-12-02T04:26:47,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742301_1477 (size=232957) 2024-12-02T04:26:47,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742302_1478 (size=127628) 2024-12-02T04:26:47,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742302_1478 (size=127628) 2024-12-02T04:26:47,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742302_1478 (size=127628) 2024-12-02T04:26:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742303_1479 (size=20406) 2024-12-02T04:26:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742303_1479 (size=20406) 2024-12-02T04:26:47,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742303_1479 (size=20406) 2024-12-02T04:26:47,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742304_1480 (size=5175431) 2024-12-02T04:26:47,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742304_1480 (size=5175431) 2024-12-02T04:26:47,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742304_1480 (size=5175431) 2024-12-02T04:26:47,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742305_1481 (size=217634) 2024-12-02T04:26:47,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742305_1481 (size=217634) 2024-12-02T04:26:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742305_1481 (size=217634) 2024-12-02T04:26:47,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T04:26:47,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T04:26:47,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T04:26:47,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742307_1483 (size=322274) 2024-12-02T04:26:47,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742307_1483 (size=322274) 2024-12-02T04:26:47,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742307_1483 (size=322274) 2024-12-02T04:26:47,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742308_1484 (size=503880) 2024-12-02T04:26:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742308_1484 (size=503880) 2024-12-02T04:26:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742308_1484 (size=503880) 2024-12-02T04:26:47,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742309_1485 (size=29229) 2024-12-02T04:26:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742309_1485 (size=29229) 2024-12-02T04:26:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742309_1485 (size=29229) 2024-12-02T04:26:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742310_1486 (size=24096) 2024-12-02T04:26:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742310_1486 (size=24096) 2024-12-02T04:26:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742310_1486 (size=24096) 2024-12-02T04:26:47,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742311_1487 (size=6424732) 2024-12-02T04:26:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742311_1487 (size=6424732) 2024-12-02T04:26:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742311_1487 (size=6424732) 2024-12-02T04:26:47,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742312_1488 (size=111872) 2024-12-02T04:26:47,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742312_1488 (size=111872) 2024-12-02T04:26:47,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742312_1488 (size=111872) 2024-12-02T04:26:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742313_1489 (size=45609) 2024-12-02T04:26:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742313_1489 (size=45609) 2024-12-02T04:26:47,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742313_1489 (size=45609) 2024-12-02T04:26:47,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742314_1490 (size=136454) 2024-12-02T04:26:47,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742314_1490 (size=136454) 2024-12-02T04:26:47,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742314_1490 (size=136454) 2024-12-02T04:26:47,124 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:26:47,126 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-02T04:26:47,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742315_1491 (size=7) 2024-12-02T04:26:47,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742315_1491 (size=7) 2024-12-02T04:26:47,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742315_1491 (size=7) 2024-12-02T04:26:47,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742316_1492 (size=10) 2024-12-02T04:26:47,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742316_1492 (size=10) 2024-12-02T04:26:47,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742316_1492 (size=10) 2024-12-02T04:26:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742317_1493 (size=303899) 2024-12-02T04:26:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742317_1493 (size=303899) 2024-12-02T04:26:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742317_1493 (size=303899) 2024-12-02T04:26:47,179 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:47,179 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:47,661 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0008_000001 (auth:SIMPLE) from 127.0.0.1:55704 2024-12-02T04:26:49,369 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:26:50,563 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:26:51,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T04:26:51,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T04:26:51,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T04:26:52,665 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0008_000001 (auth:SIMPLE) from 127.0.0.1:50410 2024-12-02T04:26:52,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742318_1494 (size=349573) 2024-12-02T04:26:52,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742318_1494 (size=349573) 2024-12-02T04:26:52,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742318_1494 (size=349573) 2024-12-02T04:26:53,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742319_1495 (size=8568) 2024-12-02T04:26:53,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742319_1495 (size=8568) 2024-12-02T04:26:53,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742319_1495 (size=8568) 2024-12-02T04:26:53,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742320_1496 (size=460) 2024-12-02T04:26:53,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742320_1496 (size=460) 2024-12-02T04:26:53,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742320_1496 (size=460) 2024-12-02T04:26:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742321_1497 (size=8568) 2024-12-02T04:26:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742321_1497 (size=8568) 2024-12-02T04:26:53,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742321_1497 (size=8568) 2024-12-02T04:26:53,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742322_1498 (size=349573) 2024-12-02T04:26:53,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742322_1498 (size=349573) 2024-12-02T04:26:53,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742322_1498 (size=349573) 2024-12-02T04:26:55,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:26:55,482 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:26:55,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:55,486 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:26:55,486 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:26:55,486 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:55,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T04:26:55,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T04:26:55,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:55,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T04:26:55,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113605478/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T04:26:55,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T04:26:55,494 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113615494"}]},"ts":"1733113615494"} 2024-12-02T04:26:55,496 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T04:26:55,496 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-02T04:26:55,496 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-02T04:26:55,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, UNASSIGN}] 2024-12-02T04:26:55,498 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, UNASSIGN 2024-12-02T04:26:55,498 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, UNASSIGN 2024-12-02T04:26:55,499 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=99c47cfef64fdc9a62ecfdcadd2900a5, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:55,499 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=910c9909767adee358a4eb402bd170a1, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:26:55,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, UNASSIGN because future has completed 2024-12-02T04:26:55,501 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:55,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:26:55,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, UNASSIGN because future has completed 2024-12-02T04:26:55,501 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:26:55,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T04:26:55,654 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:55,654 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:55,654 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 910c9909767adee358a4eb402bd170a1, disabling compactions & flushes 2024-12-02T04:26:55,655 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:55,655 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. after waiting 0 ms 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 99c47cfef64fdc9a62ecfdcadd2900a5, disabling compactions & flushes 2024-12-02T04:26:55,655 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. after waiting 0 ms 2024-12-02T04:26:55,655 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:55,661 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:55,662 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:26:55,662 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:55,662 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:26:55,662 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5. 2024-12-02T04:26:55,662 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1. 2024-12-02T04:26:55,662 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 910c9909767adee358a4eb402bd170a1: Waiting for close lock at 1733113615654Running coprocessor pre-close hooks at 1733113615654Disabling compacts and flushes for region at 1733113615654Disabling writes for close at 1733113615655 (+1 ms)Writing region close event to WAL at 1733113615656 (+1 ms)Running coprocessor post-close hooks at 1733113615662 (+6 ms)Closed at 1733113615662 2024-12-02T04:26:55,662 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 99c47cfef64fdc9a62ecfdcadd2900a5: Waiting for close lock at 1733113615655Running coprocessor pre-close hooks at 1733113615655Disabling compacts and flushes for region at 1733113615655Disabling writes for close at 1733113615655Writing region close event to WAL at 1733113615657 (+2 ms)Running coprocessor post-close hooks at 1733113615662 (+5 ms)Closed at 1733113615662 2024-12-02T04:26:55,664 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:55,665 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=910c9909767adee358a4eb402bd170a1, regionState=CLOSED 2024-12-02T04:26:55,665 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:55,665 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=99c47cfef64fdc9a62ecfdcadd2900a5, regionState=CLOSED 2024-12-02T04:26:55,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:26:55,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:55,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-02T04:26:55,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 910c9909767adee358a4eb402bd170a1, server=e2eaa0f11f7e,37413,1733113342217 in 165 msec 2024-12-02T04:26:55,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-02T04:26:55,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=910c9909767adee358a4eb402bd170a1, UNASSIGN in 171 msec 2024-12-02T04:26:55,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 99c47cfef64fdc9a62ecfdcadd2900a5, server=e2eaa0f11f7e,41643,1733113342088 in 166 msec 2024-12-02T04:26:55,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-02T04:26:55,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=99c47cfef64fdc9a62ecfdcadd2900a5, UNASSIGN in 172 msec 2024-12-02T04:26:55,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-02T04:26:55,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 174 msec 2024-12-02T04:26:55,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113615673"}]},"ts":"1733113615673"} 2024-12-02T04:26:55,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T04:26:55,674 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-02T04:26:55,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 182 msec 2024-12-02T04:26:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T04:26:55,809 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T04:26:55,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,815 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,816 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,819 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,820 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:55,820 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:55,822 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/recovered.edits] 2024-12-02T04:26:55,822 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/recovered.edits] 2024-12-02T04:26:55,826 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/cf/0da0b558631b4807b6c084d8714c7c21 2024-12-02T04:26:55,826 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/cf/7105d151e48e4039a0083b0eb67f3f06 2024-12-02T04:26:55,828 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5/recovered.edits/9.seqid 2024-12-02T04:26:55,828 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1/recovered.edits/9.seqid 2024-12-02T04:26:55,829 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:55,829 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testEmptyExportFileSystemState/910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:55,829 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-02T04:26:55,829 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-02T04:26:55,830 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-02T04:26:55,833 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241202c85813ef4ef141618f5168ac45164c59_99c47cfef64fdc9a62ecfdcadd2900a5 2024-12-02T04:26:55,834 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202a0d74810d3834d14b1add21210a9b069_910c9909767adee358a4eb402bd170a1 2024-12-02T04:26:55,834 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-02T04:26:55,836 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,838 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-02T04:26:55,841 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-02T04:26:55,842 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,842 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-02T04:26:55,842 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113615842"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:55,842 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113615842"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:55,844 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:26:55,844 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 910c9909767adee358a4eb402bd170a1, NAME => 'testtb-testEmptyExportFileSystemState,,1733113604132.910c9909767adee358a4eb402bd170a1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 99c47cfef64fdc9a62ecfdcadd2900a5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733113604132.99c47cfef64fdc9a62ecfdcadd2900a5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:26:55,844 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-02T04:26:55,845 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113615844"}]},"ts":"9223372036854775807"} 2024-12-02T04:26:55,846 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-02T04:26:55,847 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 36 msec 2024-12-02T04:26:55,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,892 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T04:26:55,892 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T04:26:55,892 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T04:26:55,892 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T04:26:55,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:55,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:55,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:55,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:55,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-02T04:26:55,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:55,899 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-02T04:26:55,899 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T04:26:55,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T04:26:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:55,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T04:26:55,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-02T04:26:55,927 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=820 (was 806) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:48716 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2026492825_1 at /127.0.0.1:53696 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:40117 from appattempt_1733113348655_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39391 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 147659) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2026492825_1 at /127.0.0.1:38534 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:38558 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39391 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7337 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:49656 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:40515 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=823 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=643 (was 713), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2339 (was 3121) 2024-12-02T04:26:55,928 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-12-02T04:26:55,946 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=820, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=643, ProcessCount=17, AvailableMemoryMB=2339 2024-12-02T04:26:55,946 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-12-02T04:26:55,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:26:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:26:55,949 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:26:55,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-02T04:26:55,949 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:26:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T04:26:55,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742323_1499 (size=440) 2024-12-02T04:26:55,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742323_1499 (size=440) 2024-12-02T04:26:55,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742323_1499 (size=440) 2024-12-02T04:26:55,958 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ecde63e4e4de7e6bec0f1bffc7ef31bd, NAME => 'testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:55,958 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 428273f3ac5f77d1702cb19f79dac3a4, NAME => 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742324_1500 (size=65) 2024-12-02T04:26:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742324_1500 (size=65) 2024-12-02T04:26:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742325_1501 (size=65) 2024-12-02T04:26:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742325_1501 (size=65) 2024-12-02T04:26:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742324_1500 (size=65) 2024-12-02T04:26:55,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742325_1501 (size=65) 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 428273f3ac5f77d1702cb19f79dac3a4, disabling compactions & flushes 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing ecde63e4e4de7e6bec0f1bffc7ef31bd, disabling compactions & flushes 2024-12-02T04:26:55,964 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:55,964 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. after waiting 0 ms 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. after waiting 0 ms 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:55,964 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:55,964 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for ecde63e4e4de7e6bec0f1bffc7ef31bd: Waiting for close lock at 1733113615964Disabling compacts and flushes for region at 1733113615964Disabling writes for close at 1733113615964Writing region close event to WAL at 1733113615964Closed at 1733113615964 2024-12-02T04:26:55,964 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 428273f3ac5f77d1702cb19f79dac3a4: Waiting for close lock at 1733113615964Disabling compacts and flushes for region at 1733113615964Disabling writes for close at 1733113615964Writing region close event to WAL at 1733113615964Closed at 1733113615964 2024-12-02T04:26:55,965 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:26:55,965 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113615965"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113615965"}]},"ts":"1733113615965"} 2024-12-02T04:26:55,965 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733113615965"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113615965"}]},"ts":"1733113615965"} 2024-12-02T04:26:55,967 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:26:55,968 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:26:55,968 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113615968"}]},"ts":"1733113615968"} 2024-12-02T04:26:55,969 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-02T04:26:55,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:26:55,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:26:55,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:26:55,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:26:55,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:26:55,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, ASSIGN}] 2024-12-02T04:26:55,972 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, ASSIGN 2024-12-02T04:26:55,972 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, ASSIGN 2024-12-02T04:26:55,972 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:26:55,972 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:26:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T04:26:56,123 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:26:56,124 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:56,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=ecde63e4e4de7e6bec0f1bffc7ef31bd, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:26:56,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, ASSIGN because future has completed 2024-12-02T04:26:56,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:26:56,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, ASSIGN because future has completed 2024-12-02T04:26:56,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:26:56,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T04:26:56,287 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,287 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 428273f3ac5f77d1702cb19f79dac3a4, NAME => 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:26:56,288 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. service=AccessControlService 2024-12-02T04:26:56,288 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:56,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:56,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,289 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,291 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:56,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => ecde63e4e4de7e6bec0f1bffc7ef31bd, NAME => 'testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:26:56,292 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,292 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. service=AccessControlService 2024-12-02T04:26:56,292 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:26:56,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:26:56,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,293 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,294 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 428273f3ac5f77d1702cb19f79dac3a4 columnFamilyName cf 2024-12-02T04:26:56,294 INFO [StoreOpener-ecde63e4e4de7e6bec0f1bffc7ef31bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,295 DEBUG [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:56,295 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] regionserver.HStore(327): Store=428273f3ac5f77d1702cb19f79dac3a4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:56,295 INFO [StoreOpener-ecde63e4e4de7e6bec0f1bffc7ef31bd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ecde63e4e4de7e6bec0f1bffc7ef31bd columnFamilyName cf 2024-12-02T04:26:56,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,296 DEBUG [StoreOpener-ecde63e4e4de7e6bec0f1bffc7ef31bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:56,296 INFO [StoreOpener-ecde63e4e4de7e6bec0f1bffc7ef31bd-1 {}] regionserver.HStore(327): Store=ecde63e4e4de7e6bec0f1bffc7ef31bd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:26:56,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,296 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,297 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,298 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,298 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,299 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:56,299 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened ecde63e4e4de7e6bec0f1bffc7ef31bd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65966897, jitterRate=-0.017016634345054626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:56,299 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:26:56,300 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,300 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 428273f3ac5f77d1702cb19f79dac3a4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72512672, jitterRate=0.08052301406860352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:26:56,300 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,300 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for ecde63e4e4de7e6bec0f1bffc7ef31bd: Running coprocessor pre-open hook at 1733113616293Writing region info on filesystem at 1733113616293Initializing all the Stores at 1733113616294 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113616294Cleaning up temporary data from old regions at 1733113616297 (+3 ms)Running coprocessor post-open hooks at 1733113616300 (+3 ms)Region opened successfully at 1733113616300 2024-12-02T04:26:56,300 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 428273f3ac5f77d1702cb19f79dac3a4: Running coprocessor pre-open hook at 1733113616289Writing region info on filesystem at 1733113616289Initializing all the Stores at 1733113616291 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113616291Cleaning up temporary data from old regions at 1733113616297 (+6 ms)Running coprocessor post-open hooks at 1733113616300 (+3 ms)Region opened successfully at 1733113616300 2024-12-02T04:26:56,300 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4., pid=216, masterSystemTime=1733113616282 2024-12-02T04:26:56,300 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd., pid=217, masterSystemTime=1733113616287 2024-12-02T04:26:56,302 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:56,302 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:56,302 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=ecde63e4e4de7e6bec0f1bffc7ef31bd, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:26:56,302 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,302 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,303 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:26:56,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:26:56,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:26:56,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-02T04:26:56,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258 in 174 msec 2024-12-02T04:26:56,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-02T04:26:56,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, ASSIGN in 335 msec 2024-12-02T04:26:56,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088 in 177 msec 2024-12-02T04:26:56,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-02T04:26:56,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, ASSIGN in 336 msec 2024-12-02T04:26:56,308 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:26:56,309 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113616308"}]},"ts":"1733113616308"} 2024-12-02T04:26:56,310 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-02T04:26:56,310 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:26:56,310 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-02T04:26:56,312 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T04:26:56,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:56,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:56,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:56,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T04:26:56,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 409 msec 2024-12-02T04:26:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T04:26:56,579 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T04:26:56,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,583 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-02T04:26:56,583 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,584 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:56,587 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,593 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,598 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T04:26:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113616601 (current time:1733113616601). 2024-12-02T04:26:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T04:26:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:56,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ead083, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:56,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:56,604 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:56,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:56,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:56,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56554b1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:56,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:56,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,606 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:56,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec841cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:56,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:56,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,613 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c581af5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:56,615 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:56,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:56,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:56,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2044ab60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:56,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:56,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,616 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:56,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb8e0be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:56,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:56,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,619 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:56,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,622 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T04:26:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T04:26:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T04:26:56,624 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T04:26:56,625 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:56,627 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:56,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742326_1502 (size=161) 2024-12-02T04:26:56,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742326_1502 (size=161) 2024-12-02T04:26:56,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742326_1502 (size=161) 2024-12-02T04:26:56,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:56,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd}] 2024-12-02T04:26:56,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,634 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T04:26:56,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-02T04:26:56,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 428273f3ac5f77d1702cb19f79dac3a4: 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for ecde63e4e4de7e6bec0f1bffc7ef31bd: 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:56,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:26:56,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742327_1503 (size=68) 2024-12-02T04:26:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742327_1503 (size=68) 2024-12-02T04:26:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742327_1503 (size=68) 2024-12-02T04:26:56,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:56,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-02T04:26:56,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-02T04:26:56,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,801 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd in 169 msec 2024-12-02T04:26:56,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742328_1504 (size=68) 2024-12-02T04:26:56,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742328_1504 (size=68) 2024-12-02T04:26:56,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742328_1504 (size=68) 2024-12-02T04:26:56,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-02T04:26:56,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-02T04:26:56,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,806 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:56,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-12-02T04:26:56,808 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:56,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 in 174 msec 2024-12-02T04:26:56,809 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:56,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:56,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:56,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:56,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:26:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742329_1505 (size=60) 2024-12-02T04:26:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742329_1505 (size=60) 2024-12-02T04:26:56,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742329_1505 (size=60) 2024-12-02T04:26:56,817 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:56,817 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-02T04:26:56,817 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-02T04:26:56,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742330_1506 (size=641) 2024-12-02T04:26:56,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742330_1506 (size=641) 2024-12-02T04:26:56,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742330_1506 (size=641) 2024-12-02T04:26:56,827 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:56,832 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:56,832 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-02T04:26:56,833 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:56,833 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T04:26:56,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 210 msec 2024-12-02T04:26:56,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T04:26:56,937 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T04:26:56,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:56,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:26:56,947 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,949 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-02T04:26:56,949 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:56,949 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:26:56,950 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,954 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,959 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T04:26:56,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T04:26:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113616961 (current time:1733113616961). 2024-12-02T04:26:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:26:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T04:26:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:26:56,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a6fcb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:56,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:56,963 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7aeef9be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,964 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56592, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:56,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42f13876, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:56,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:56,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,966 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,967 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bf87e70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:26:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:26:56,968 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:26:56,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:26:56,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:26:56,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@739269d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:26:56,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:26:56,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,969 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:26:56,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@486a0dba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:26:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:26:56,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:26:56,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,971 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:26:56,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:26:56,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46414, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:26:56,974 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:26:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:26:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:26:56,975 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:26:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T04:26:56,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:26:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T04:26:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T04:26:56,978 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:26:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T04:26:56,979 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:26:56,981 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:26:56,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742331_1507 (size=156) 2024-12-02T04:26:56,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742331_1507 (size=156) 2024-12-02T04:26:56,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742331_1507 (size=156) 2024-12-02T04:26:56,988 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:26:56,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd}] 2024-12-02T04:26:56,989 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:56,989 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:57,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T04:26:57,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-02T04:26:57,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-02T04:26:57,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:57,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:57,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing ecde63e4e4de7e6bec0f1bffc7ef31bd 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T04:26:57,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 428273f3ac5f77d1702cb19f79dac3a4 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T04:26:57,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd is 71, key is 1485183648c5a1599b0fe3fa83bbbb01/cf:q/1733113616946/Put/seqid=0 2024-12-02T04:26:57,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 is 71, key is 09af6f3d2644ffb9be98ba465d03cc81/cf:q/1733113616945/Put/seqid=0 2024-12-02T04:26:57,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742332_1508 (size=8171) 2024-12-02T04:26:57,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:57,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742332_1508 (size=8171) 2024-12-02T04:26:57,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742332_1508 (size=8171) 2024-12-02T04:26:57,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742333_1509 (size=5101) 2024-12-02T04:26:57,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742333_1509 (size=5101) 2024-12-02T04:26:57,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742333_1509 (size=5101) 2024-12-02T04:26:57,179 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:26:57,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:57,182 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:57,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/.tmp/cf/994fab0933c04e3f8b0c18c8de4c0e58, store: [table=testtb-testExportWithChecksum family=cf region=ecde63e4e4de7e6bec0f1bffc7ef31bd] 2024-12-02T04:26:57,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/.tmp/cf/994fab0933c04e3f8b0c18c8de4c0e58 is 206, key is 1adcf4ca16cf88383b1f93c14efb19273/cf:q/1733113616946/Put/seqid=0 2024-12-02T04:26:57,183 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:57,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/.tmp/cf/4224f7e12588427498bb6912d6a5b1e6, store: [table=testtb-testExportWithChecksum family=cf region=428273f3ac5f77d1702cb19f79dac3a4] 2024-12-02T04:26:57,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/.tmp/cf/4224f7e12588427498bb6912d6a5b1e6 is 206, key is 0c148d857cb5dc1ab6dba60d2ce677f7a/cf:q/1733113616945/Put/seqid=0 2024-12-02T04:26:57,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742334_1510 (size=14853) 2024-12-02T04:26:57,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742334_1510 (size=14853) 2024-12-02T04:26:57,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742334_1510 (size=14853) 2024-12-02T04:26:57,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/.tmp/cf/994fab0933c04e3f8b0c18c8de4c0e58 2024-12-02T04:26:57,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742335_1511 (size=5904) 2024-12-02T04:26:57,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742335_1511 (size=5904) 2024-12-02T04:26:57,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/.tmp/cf/994fab0933c04e3f8b0c18c8de4c0e58 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 2024-12-02T04:26:57,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742335_1511 (size=5904) 2024-12-02T04:26:57,193 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/.tmp/cf/4224f7e12588427498bb6912d6a5b1e6 2024-12-02T04:26:57,196 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58, entries=47, sequenceid=6, filesize=14.5 K 2024-12-02T04:26:57,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/.tmp/cf/4224f7e12588427498bb6912d6a5b1e6 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 2024-12-02T04:26:57,197 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for ecde63e4e4de7e6bec0f1bffc7ef31bd in 53ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:57,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for ecde63e4e4de7e6bec0f1bffc7ef31bd: 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. for snaptb0-testExportWithChecksum completed. 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58] hfiles 2024-12-02T04:26:57,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T04:26:57,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6, entries=3, sequenceid=6, filesize=5.8 K 2024-12-02T04:26:57,202 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 428273f3ac5f77d1702cb19f79dac3a4 in 59ms, sequenceid=6, compaction requested=false 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 428273f3ac5f77d1702cb19f79dac3a4: 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. for snaptb0-testExportWithChecksum completed. 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6] hfiles 2024-12-02T04:26:57,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T04:26:57,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742336_1512 (size=107) 2024-12-02T04:26:57,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742336_1512 (size=107) 2024-12-02T04:26:57,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742336_1512 (size=107) 2024-12-02T04:26:57,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:26:57,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-02T04:26:57,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-02T04:26:57,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:57,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:57,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd in 224 msec 2024-12-02T04:26:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742337_1513 (size=107) 2024-12-02T04:26:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742337_1513 (size=107) 2024-12-02T04:26:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742337_1513 (size=107) 2024-12-02T04:26:57,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:26:57,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-02T04:26:57,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-02T04:26:57,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:57,223 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:57,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-02T04:26:57,226 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:26:57,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4 in 236 msec 2024-12-02T04:26:57,226 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:26:57,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:26:57,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:26:57,227 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:26:57,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4] hfiles 2024-12-02T04:26:57,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:26:57,228 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:26:57,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742338_1514 (size=291) 2024-12-02T04:26:57,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742338_1514 (size=291) 2024-12-02T04:26:57,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742338_1514 (size=291) 2024-12-02T04:26:57,239 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:26:57,239 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-02T04:26:57,239 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T04:26:57,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742339_1515 (size=951) 2024-12-02T04:26:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742339_1515 (size=951) 2024-12-02T04:26:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742339_1515 (size=951) 2024-12-02T04:26:57,260 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:26:57,265 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:26:57,265 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T04:26:57,266 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:26:57,266 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T04:26:57,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 290 msec 2024-12-02T04:26:57,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T04:26:57,298 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T04:26:57,298 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298 2024-12-02T04:26:57,298 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:57,321 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:26:57,321 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@530d277f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T04:26:57,323 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:26:57,326 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T04:26:57,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:57,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:57,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-12465057851740793438.jar 2024-12-02T04:26:58,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-10489311931404159228.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:26:58,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:26:58,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:26:58,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:26:58,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:26:58,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:26:58,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:58,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:26:58,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742340_1516 (size=131440) 2024-12-02T04:26:58,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742340_1516 (size=131440) 2024-12-02T04:26:58,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742340_1516 (size=131440) 2024-12-02T04:26:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T04:26:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T04:26:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T04:26:58,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T04:26:58,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T04:26:58,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T04:26:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742343_1519 (size=903927) 2024-12-02T04:26:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742343_1519 (size=903927) 2024-12-02T04:26:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742343_1519 (size=903927) 2024-12-02T04:26:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T04:26:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T04:26:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T04:26:58,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T04:26:58,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T04:26:58,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T04:26:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742346_1522 (size=77835) 2024-12-02T04:26:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742346_1522 (size=77835) 2024-12-02T04:26:58,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742346_1522 (size=77835) 2024-12-02T04:26:58,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742347_1523 (size=30949) 2024-12-02T04:26:58,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742347_1523 (size=30949) 2024-12-02T04:26:58,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742347_1523 (size=30949) 2024-12-02T04:26:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T04:26:58,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T04:26:58,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T04:26:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742349_1525 (size=440957) 2024-12-02T04:26:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742349_1525 (size=440957) 2024-12-02T04:26:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742349_1525 (size=440957) 2024-12-02T04:26:58,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T04:26:58,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T04:26:58,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T04:26:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742351_1527 (size=232957) 2024-12-02T04:26:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742351_1527 (size=232957) 2024-12-02T04:26:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742351_1527 (size=232957) 2024-12-02T04:26:58,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742352_1528 (size=127628) 2024-12-02T04:26:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742352_1528 (size=127628) 2024-12-02T04:26:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742352_1528 (size=127628) 2024-12-02T04:26:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742353_1529 (size=20406) 2024-12-02T04:26:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742353_1529 (size=20406) 2024-12-02T04:26:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742353_1529 (size=20406) 2024-12-02T04:26:58,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742354_1530 (size=6424732) 2024-12-02T04:26:58,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742354_1530 (size=6424732) 2024-12-02T04:26:58,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742354_1530 (size=6424732) 2024-12-02T04:26:58,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742355_1531 (size=5175431) 2024-12-02T04:26:58,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742355_1531 (size=5175431) 2024-12-02T04:26:58,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742355_1531 (size=5175431) 2024-12-02T04:26:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742356_1532 (size=217634) 2024-12-02T04:26:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742356_1532 (size=217634) 2024-12-02T04:26:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742356_1532 (size=217634) 2024-12-02T04:26:58,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742357_1533 (size=1832290) 2024-12-02T04:26:58,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742357_1533 (size=1832290) 2024-12-02T04:26:58,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742357_1533 (size=1832290) 2024-12-02T04:26:58,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742358_1534 (size=322274) 2024-12-02T04:26:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742358_1534 (size=322274) 2024-12-02T04:26:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742358_1534 (size=322274) 2024-12-02T04:26:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742359_1535 (size=503880) 2024-12-02T04:26:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742359_1535 (size=503880) 2024-12-02T04:26:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742359_1535 (size=503880) 2024-12-02T04:26:58,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742360_1536 (size=29229) 2024-12-02T04:26:58,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742360_1536 (size=29229) 2024-12-02T04:26:58,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742360_1536 (size=29229) 2024-12-02T04:26:58,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742361_1537 (size=24096) 2024-12-02T04:26:58,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742361_1537 (size=24096) 2024-12-02T04:26:58,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742361_1537 (size=24096) 2024-12-02T04:26:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742362_1538 (size=111872) 2024-12-02T04:26:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742362_1538 (size=111872) 2024-12-02T04:26:58,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742362_1538 (size=111872) 2024-12-02T04:26:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742363_1539 (size=45609) 2024-12-02T04:26:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742363_1539 (size=45609) 2024-12-02T04:26:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742363_1539 (size=45609) 2024-12-02T04:26:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742364_1540 (size=136454) 2024-12-02T04:26:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742364_1540 (size=136454) 2024-12-02T04:26:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742364_1540 (size=136454) 2024-12-02T04:26:58,492 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:26:58,494 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T04:26:58,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T04:26:58,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T04:26:58,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T04:26:58,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T04:26:58,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742365_1541 (size=1023) 2024-12-02T04:26:58,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742365_1541 (size=1023) 2024-12-02T04:26:58,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742365_1541 (size=1023) 2024-12-02T04:26:58,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742366_1542 (size=35) 2024-12-02T04:26:58,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742366_1542 (size=35) 2024-12-02T04:26:58,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742366_1542 (size=35) 2024-12-02T04:26:58,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742367_1543 (size=304044) 2024-12-02T04:26:58,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742367_1543 (size=304044) 2024-12-02T04:26:58,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742367_1543 (size=304044) 2024-12-02T04:26:59,930 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:59,931 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:26:59,934 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0008_000001 (auth:SIMPLE) from 127.0.0.1:51900 2024-12-02T04:26:59,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0008/container_1733113348655_0008_01_000001/launch_container.sh] 2024-12-02T04:26:59,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0008/container_1733113348655_0008_01_000001/container_tokens] 2024-12-02T04:26:59,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0008/container_1733113348655_0008_01_000001/sysfs] 2024-12-02T04:27:00,867 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:33108 2024-12-02T04:27:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T04:27:01,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-02T04:27:01,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T04:27:06,005 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:46144 2024-12-02T04:27:06,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742368_1544 (size=349742) 2024-12-02T04:27:06,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742368_1544 (size=349742) 2024-12-02T04:27:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742368_1544 (size=349742) 2024-12-02T04:27:07,179 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:27:08,197 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:52592 2024-12-02T04:27:08,197 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40018 2024-12-02T04:27:09,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40026 2024-12-02T04:27:09,091 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:52594 2024-12-02T04:27:11,934 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0009_01_000006 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:14,545 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000003/launch_container.sh] 2024-12-02T04:27:14,545 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000003/container_tokens] 2024-12-02T04:27:14,547 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:15,129 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:52202 2024-12-02T04:27:16,032 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000004/launch_container.sh] 2024-12-02T04:27:16,032 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000004/container_tokens] 2024-12-02T04:27:16,032 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000004/sysfs] 2024-12-02T04:27:16,115 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:52204 2024-12-02T04:27:16,118 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:57906 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:17,113 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:52212 2024-12-02T04:27:17,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d95ecee29e5f13e9036a6e1711d1a0f3, had cached 0 bytes from a total of 6284 2024-12-02T04:27:17,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 37a29921e433fcac8df396b670af8f39, had cached 0 bytes from a total of 14265 2024-12-02T04:27:18,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000002/launch_container.sh] 2024-12-02T04:27:18,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000002/container_tokens] 2024-12-02T04:27:18,783 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000002/sysfs] 2024-12-02T04:27:19,008 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0009_01_000011 while processing FINISH_CONTAINERS event 2024-12-02T04:27:19,903 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-02T04:27:20,564 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:27:21,052 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000005/launch_container.sh] 2024-12-02T04:27:21,067 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000005/container_tokens] 2024-12-02T04:27:21,067 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000005/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:22,136 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:35374 2024-12-02T04:27:22,193 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T04:27:22,235 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T04:27:22,277 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T04:27:22,407 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T04:27:22,408 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-02T04:27:22,438 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000009/launch_container.sh] 2024-12-02T04:27:22,438 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000009/container_tokens] 2024-12-02T04:27:22,438 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000009/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:23,904 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T04:27:23,915 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T04:27:23,935 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T04:27:24,151 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:35380 2024-12-02T04:27:24,151 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:36432 2024-12-02T04:27:24,325 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000010/launch_container.sh] 2024-12-02T04:27:24,325 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000010/container_tokens] 2024-12-02T04:27:24,325 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:24,835 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 208769 ms 2024-12-02T04:27:24,966 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T04:27:24,966 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T04:27:24,967 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ecde63e4e4de7e6bec0f1bffc7ef31bd changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:27:24,967 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d95ecee29e5f13e9036a6e1711d1a0f3 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:27:24,967 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 37a29921e433fcac8df396b670af8f39 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:27:24,967 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 428273f3ac5f77d1702cb19f79dac3a4 changed from -1.0 to 0.0, refreshing cache 2024-12-02T04:27:24,977 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T04:27:24,980 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-02T04:27:24,980 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-02T04:27:24,980 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 3 regions 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:27:24,981 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:27:24,981 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:27:24,981 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:27:24,981 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-02T04:27:24,984 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-12-02T04:27:24,985 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25161495984578286, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8209636651948572, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8575790006018934, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-02T04:27:25,324 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 342 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25161495984578286 to a new imbalance of 0.015748566714415848. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8209636651948572, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8575790006018934, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-02T04:27:25,336 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-12-02T04:27:25,336 INFO [master/e2eaa0f11f7e:0.Chore.1 {}] master.HMaster(2172): balance hri=428273f3ac5f77d1702cb19f79dac3a4, source=e2eaa0f11f7e,41643,1733113342088, destination=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:27:25,343 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE 2024-12-02T04:27:25,345 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE 2024-12-02T04:27:25,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:27:25,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE because future has completed 2024-12-02T04:27:25,352 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:27:25,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:27:25,506 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,506 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:27:25,506 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing 428273f3ac5f77d1702cb19f79dac3a4, disabling compactions & flushes 2024-12-02T04:27:25,506 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,506 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,507 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. after waiting 0 ms 2024-12-02T04:27:25,507 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,536 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:27:25,536 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:27:25,537 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,537 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for 428273f3ac5f77d1702cb19f79dac3a4: Waiting for close lock at 1733113645506Running coprocessor pre-close hooks at 1733113645506Disabling compacts and flushes for region at 1733113645506Disabling writes for close at 1733113645507 (+1 ms)Writing region close event to WAL at 1733113645513 (+6 ms)Running coprocessor post-close hooks at 1733113645536 (+23 ms)Closed at 1733113645536 2024-12-02T04:27:25,537 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding 428273f3ac5f77d1702cb19f79dac3a4 move to e2eaa0f11f7e,37413,1733113342217 record at close sequenceid=6 2024-12-02T04:27:25,540 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,541 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=CLOSED 2024-12-02T04:27:25,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:27:25,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-02T04:27:25,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,41643,1733113342088 in 193 msec 2024-12-02T04:27:25,552 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE; state=CLOSED, location=e2eaa0f11f7e,37413,1733113342217; forceNewPlan=false, retain=false 2024-12-02T04:27:25,704 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T04:27:25,705 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=OPENING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:27:25,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE because future has completed 2024-12-02T04:27:25,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:27:25,865 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,866 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 428273f3ac5f77d1702cb19f79dac3a4, NAME => 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:27:25,866 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. service=AccessControlService 2024-12-02T04:27:25,866 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:27:25,867 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,867 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:27:25,867 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,867 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,876 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,879 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 428273f3ac5f77d1702cb19f79dac3a4 columnFamilyName cf 2024-12-02T04:27:25,880 DEBUG [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:27:25,919 DEBUG [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 2024-12-02T04:27:25,919 INFO [StoreOpener-428273f3ac5f77d1702cb19f79dac3a4-1 {}] regionserver.HStore(327): Store=428273f3ac5f77d1702cb19f79dac3a4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:27:25,919 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,921 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,922 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,923 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,923 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,925 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,926 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1114): Opened 428273f3ac5f77d1702cb19f79dac3a4; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61038274, jitterRate=-0.09045884013175964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:27:25,926 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:25,926 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for 428273f3ac5f77d1702cb19f79dac3a4: Running coprocessor pre-open hook at 1733113645867Writing region info on filesystem at 1733113645867Initializing all the Stores at 1733113645874 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113645874Cleaning up temporary data from old regions at 1733113645923 (+49 ms)Running coprocessor post-open hooks at 1733113645926 (+3 ms)Region opened successfully at 1733113645926 2024-12-02T04:27:25,927 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4., pid=226, masterSystemTime=1733113645862 2024-12-02T04:27:25,930 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,930 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:25,930 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=OPEN, openSeqNum=10, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:27:25,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:27:25,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-02T04:27:25,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217 in 227 msec 2024-12-02T04:27:25,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, REOPEN/MOVE in 602 msec 2024-12-02T04:27:25,946 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-02T04:27:26,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:36436 2024-12-02T04:27:26,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000007/launch_container.sh] 2024-12-02T04:27:26,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000007/container_tokens] 2024-12-02T04:27:26,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000007/sysfs] 2024-12-02T04:27:27,776 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000008/launch_container.sh] 2024-12-02T04:27:27,776 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000008/container_tokens] 2024-12-02T04:27:27,776 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000008/sysfs] 2024-12-02T04:27:28,241 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000013/launch_container.sh] 2024-12-02T04:27:28,241 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000013/container_tokens] 2024-12-02T04:27:28,241 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000013/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:29,178 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:35384 2024-12-02T04:27:30,058 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:27:31,422 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:31,869 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. because 1cd3dc0c66d2745722bcc18163a3b02e/l has an old edit so flush to free WALs after random delay 170368 ms 2024-12-02T04:27:32,238 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000014/launch_container.sh] 2024-12-02T04:27:32,238 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000014/container_tokens] 2024-12-02T04:27:32,238 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:32,911 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40234 2024-12-02T04:27:33,935 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40244 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/local-export-1733113617298/archive/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T04:27:34,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40256 2024-12-02T04:27:35,200 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0009_01_000020 while processing FINISH_CONTAINERS event 2024-12-02T04:27:36,703 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40270 2024-12-02T04:27:36,706 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40272 2024-12-02T04:27:36,706 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:40274 2024-12-02T04:27:36,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742369_1545 (size=48989) 2024-12-02T04:27:36,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742369_1545 (size=48989) 2024-12-02T04:27:36,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742369_1545 (size=48989) 2024-12-02T04:27:36,847 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733113348655_0009_01_000019 is : 143 2024-12-02T04:27:36,862 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733113348655_0009_01_000018 is : 143 2024-12-02T04:27:36,872 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000019/launch_container.sh] 2024-12-02T04:27:36,872 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000019/container_tokens] 2024-12-02T04:27:36,872 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000019/sysfs] 2024-12-02T04:27:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742370_1546 (size=460) 2024-12-02T04:27:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742370_1546 (size=460) 2024-12-02T04:27:36,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742370_1546 (size=460) 2024-12-02T04:27:36,874 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733113348655_0009_01_000021 is : 143 2024-12-02T04:27:36,899 WARN [ContainersLauncher #8 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000018/launch_container.sh] 2024-12-02T04:27:36,899 WARN [ContainersLauncher #8 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000018/container_tokens] 2024-12-02T04:27:36,899 WARN [ContainersLauncher #8 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000018/sysfs] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000021/launch_container.sh] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000021/container_tokens] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000017/launch_container.sh] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000017/container_tokens] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000021/sysfs] 2024-12-02T04:27:36,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000017/sysfs] 2024-12-02T04:27:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742371_1547 (size=48989) 2024-12-02T04:27:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742371_1547 (size=48989) 2024-12-02T04:27:36,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742371_1547 (size=48989) 2024-12-02T04:27:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742372_1548 (size=349742) 2024-12-02T04:27:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742372_1548 (size=349742) 2024-12-02T04:27:36,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742372_1548 (size=349742) 2024-12-02T04:27:37,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000015/launch_container.sh] 2024-12-02T04:27:37,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000015/container_tokens] 2024-12-02T04:27:37,047 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000015/sysfs] 2024-12-02T04:27:37,865 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0009_01_000022 while processing FINISH_CONTAINERS event 2024-12-02T04:27:38,862 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733113348655_0009_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:27:38,864 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863 2024-12-02T04:27:38,864 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:27:38,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:27:38,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T04:27:38,931 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:27:38,954 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T04:27:39,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742373_1549 (size=951) 2024-12-02T04:27:39,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742373_1549 (size=951) 2024-12-02T04:27:39,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742373_1549 (size=951) 2024-12-02T04:27:39,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742374_1550 (size=156) 2024-12-02T04:27:39,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742374_1550 (size=156) 2024-12-02T04:27:39,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742374_1550 (size=156) 2024-12-02T04:27:39,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:39,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:39,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:39,556 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000016/launch_container.sh] 2024-12-02T04:27:39,556 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000016/container_tokens] 2024-12-02T04:27:39,556 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000016/sysfs] 2024-12-02T04:27:40,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-11543527499336532460.jar 2024-12-02T04:27:40,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-15481283671095653409.jar 2024-12-02T04:27:40,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:27:40,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:27:40,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:27:40,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:27:40,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:27:40,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:27:40,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:27:40,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:27:40,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:27:40,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742375_1551 (size=131440) 2024-12-02T04:27:40,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742375_1551 (size=131440) 2024-12-02T04:27:40,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742375_1551 (size=131440) 2024-12-02T04:27:40,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T04:27:40,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T04:27:40,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T04:27:40,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T04:27:40,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T04:27:40,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T04:27:40,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742378_1554 (size=903927) 2024-12-02T04:27:40,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742378_1554 (size=903927) 2024-12-02T04:27:40,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742378_1554 (size=903927) 2024-12-02T04:27:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T04:27:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T04:27:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T04:27:40,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T04:27:40,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T04:27:40,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T04:27:40,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742381_1557 (size=77835) 2024-12-02T04:27:40,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742381_1557 (size=77835) 2024-12-02T04:27:40,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742381_1557 (size=77835) 2024-12-02T04:27:40,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742382_1558 (size=30949) 2024-12-02T04:27:40,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742382_1558 (size=30949) 2024-12-02T04:27:40,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742382_1558 (size=30949) 2024-12-02T04:27:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742383_1559 (size=1597213) 2024-12-02T04:27:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742383_1559 (size=1597213) 2024-12-02T04:27:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742383_1559 (size=1597213) 2024-12-02T04:27:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742384_1560 (size=6424732) 2024-12-02T04:27:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742384_1560 (size=6424732) 2024-12-02T04:27:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742384_1560 (size=6424732) 2024-12-02T04:27:40,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T04:27:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T04:27:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T04:27:40,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742386_1562 (size=232957) 2024-12-02T04:27:40,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742386_1562 (size=232957) 2024-12-02T04:27:40,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742386_1562 (size=232957) 2024-12-02T04:27:40,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742387_1563 (size=127628) 2024-12-02T04:27:40,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742387_1563 (size=127628) 2024-12-02T04:27:40,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742387_1563 (size=127628) 2024-12-02T04:27:40,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742388_1564 (size=440957) 2024-12-02T04:27:40,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742388_1564 (size=440957) 2024-12-02T04:27:40,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742388_1564 (size=440957) 2024-12-02T04:27:40,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742389_1565 (size=20406) 2024-12-02T04:27:40,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742389_1565 (size=20406) 2024-12-02T04:27:40,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742389_1565 (size=20406) 2024-12-02T04:27:40,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742390_1566 (size=5175431) 2024-12-02T04:27:40,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742390_1566 (size=5175431) 2024-12-02T04:27:40,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742390_1566 (size=5175431) 2024-12-02T04:27:40,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742391_1567 (size=217634) 2024-12-02T04:27:40,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742391_1567 (size=217634) 2024-12-02T04:27:40,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742391_1567 (size=217634) 2024-12-02T04:27:40,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742392_1568 (size=1832290) 2024-12-02T04:27:40,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742392_1568 (size=1832290) 2024-12-02T04:27:40,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742392_1568 (size=1832290) 2024-12-02T04:27:40,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742393_1569 (size=322274) 2024-12-02T04:27:40,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742393_1569 (size=322274) 2024-12-02T04:27:40,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742393_1569 (size=322274) 2024-12-02T04:27:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742394_1570 (size=503880) 2024-12-02T04:27:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742394_1570 (size=503880) 2024-12-02T04:27:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742394_1570 (size=503880) 2024-12-02T04:27:40,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742395_1571 (size=29229) 2024-12-02T04:27:40,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742395_1571 (size=29229) 2024-12-02T04:27:40,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742395_1571 (size=29229) 2024-12-02T04:27:40,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742396_1572 (size=24096) 2024-12-02T04:27:40,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742396_1572 (size=24096) 2024-12-02T04:27:40,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742396_1572 (size=24096) 2024-12-02T04:27:40,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742397_1573 (size=111872) 2024-12-02T04:27:40,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742397_1573 (size=111872) 2024-12-02T04:27:40,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742397_1573 (size=111872) 2024-12-02T04:27:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742398_1574 (size=45609) 2024-12-02T04:27:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742398_1574 (size=45609) 2024-12-02T04:27:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742398_1574 (size=45609) 2024-12-02T04:27:40,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742399_1575 (size=136454) 2024-12-02T04:27:40,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742399_1575 (size=136454) 2024-12-02T04:27:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742399_1575 (size=136454) 2024-12-02T04:27:40,406 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:27:40,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T04:27:40,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T04:27:40,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T04:27:40,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T04:27:40,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T04:27:40,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742400_1576 (size=1023) 2024-12-02T04:27:40,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742400_1576 (size=1023) 2024-12-02T04:27:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742400_1576 (size=1023) 2024-12-02T04:27:40,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742401_1577 (size=35) 2024-12-02T04:27:40,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742401_1577 (size=35) 2024-12-02T04:27:40,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742401_1577 (size=35) 2024-12-02T04:27:40,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742402_1578 (size=303996) 2024-12-02T04:27:40,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742402_1578 (size=303996) 2024-12-02T04:27:40,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742402_1578 (size=303996) 2024-12-02T04:27:41,293 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ecde63e4e4de7e6bec0f1bffc7ef31bd, had cached 0 bytes from a total of 14853 2024-12-02T04:27:43,005 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:27:43,005 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:27:43,007 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0009_000001 (auth:SIMPLE) from 127.0.0.1:48956 2024-12-02T04:27:43,017 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000001/launch_container.sh] 2024-12-02T04:27:43,017 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000001/container_tokens] 2024-12-02T04:27:43,017 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0009/container_1733113348655_0009_01_000001/sysfs] 2024-12-02T04:27:43,886 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:34206 2024-12-02T04:27:48,212 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:39104 2024-12-02T04:27:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742403_1579 (size=349694) 2024-12-02T04:27:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742403_1579 (size=349694) 2024-12-02T04:27:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742403_1579 (size=349694) 2024-12-02T04:27:50,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:34216 2024-12-02T04:27:50,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:48968 2024-12-02T04:27:50,564 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:27:51,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:48984 2024-12-02T04:27:51,320 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:34224 2024-12-02T04:27:54,008 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:27:55,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742404_1580 (size=14853) 2024-12-02T04:27:55,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742404_1580 (size=14853) 2024-12-02T04:27:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742404_1580 (size=14853) 2024-12-02T04:27:55,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000002/launch_container.sh] 2024-12-02T04:27:55,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000002/container_tokens] 2024-12-02T04:27:55,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000002/sysfs] 2024-12-02T04:27:57,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742406_1582 (size=5904) 2024-12-02T04:27:57,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742406_1582 (size=5904) 2024-12-02T04:27:57,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742406_1582 (size=5904) 2024-12-02T04:27:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742407_1583 (size=5101) 2024-12-02T04:27:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742407_1583 (size=5101) 2024-12-02T04:27:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742407_1583 (size=5101) 2024-12-02T04:27:57,526 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000004/launch_container.sh] 2024-12-02T04:27:57,526 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000004/container_tokens] 2024-12-02T04:27:57,526 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000004/sysfs] 2024-12-02T04:27:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742408_1584 (size=8171) 2024-12-02T04:27:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742408_1584 (size=8171) 2024-12-02T04:27:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742408_1584 (size=8171) 2024-12-02T04:27:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742405_1581 (size=31733) 2024-12-02T04:27:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742405_1581 (size=31733) 2024-12-02T04:27:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742405_1581 (size=31733) 2024-12-02T04:27:57,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000005/launch_container.sh] 2024-12-02T04:27:57,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000005/container_tokens] 2024-12-02T04:27:57,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000005/sysfs] 2024-12-02T04:27:57,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742409_1585 (size=463) 2024-12-02T04:27:57,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742409_1585 (size=463) 2024-12-02T04:27:57,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742409_1585 (size=463) 2024-12-02T04:27:57,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000003/launch_container.sh] 2024-12-02T04:27:57,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000003/container_tokens] 2024-12-02T04:27:57,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_3/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000003/sysfs] 2024-12-02T04:27:57,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742410_1586 (size=31733) 2024-12-02T04:27:57,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742410_1586 (size=31733) 2024-12-02T04:27:57,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742410_1586 (size=31733) 2024-12-02T04:27:57,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742411_1587 (size=349694) 2024-12-02T04:27:57,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742411_1587 (size=349694) 2024-12-02T04:27:57,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742411_1587 (size=349694) 2024-12-02T04:27:58,236 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:47610 2024-12-02T04:27:58,251 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:46196 2024-12-02T04:27:58,259 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:46200 2024-12-02T04:27:59,692 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:27:59,693 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:27:59,699 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-02T04:27:59,699 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:27:59,699 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:27:59,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T04:27:59,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T04:27:59,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T04:27:59,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T04:27:59,700 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T04:27:59,700 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113658863/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T04:27:59,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-02T04:27:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:27:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-02T04:27:59,707 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113679707"}]},"ts":"1733113679707"} 2024-12-02T04:27:59,709 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-02T04:27:59,709 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-02T04:27:59,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-02T04:27:59,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, UNASSIGN}] 2024-12-02T04:27:59,711 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, UNASSIGN 2024-12-02T04:27:59,711 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, UNASSIGN 2024-12-02T04:27:59,712 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=CLOSING, regionLocation=e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:27:59,712 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=ecde63e4e4de7e6bec0f1bffc7ef31bd, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:27:59,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, UNASSIGN because future has completed 2024-12-02T04:27:59,714 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:27:59,714 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:27:59,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, UNASSIGN because future has completed 2024-12-02T04:27:59,715 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:27:59,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217}] 2024-12-02T04:27:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-02T04:27:59,866 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:59,866 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:27:59,866 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:27:59,866 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing 428273f3ac5f77d1702cb19f79dac3a4, disabling compactions & flushes 2024-12-02T04:27:59,867 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing ecde63e4e4de7e6bec0f1bffc7ef31bd, disabling compactions & flushes 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:59,867 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. after waiting 0 ms 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. after waiting 0 ms 2024-12-02T04:27:59,867 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:27:59,871 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-12-02T04:27:59,871 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:27:59,872 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:27:59,872 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4. 2024-12-02T04:27:59,872 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for 428273f3ac5f77d1702cb19f79dac3a4: Waiting for close lock at 1733113679867Running coprocessor pre-close hooks at 1733113679867Disabling compacts and flushes for region at 1733113679867Disabling writes for close at 1733113679867Writing region close event to WAL at 1733113679868 (+1 ms)Running coprocessor post-close hooks at 1733113679872 (+4 ms)Closed at 1733113679872 2024-12-02T04:27:59,872 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:27:59,872 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd. 2024-12-02T04:27:59,872 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for ecde63e4e4de7e6bec0f1bffc7ef31bd: Waiting for close lock at 1733113679867Running coprocessor pre-close hooks at 1733113679867Disabling compacts and flushes for region at 1733113679867Disabling writes for close at 1733113679867Writing region close event to WAL at 1733113679868 (+1 ms)Running coprocessor post-close hooks at 1733113679872 (+4 ms)Closed at 1733113679872 2024-12-02T04:27:59,874 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed 428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:27:59,876 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=428273f3ac5f77d1702cb19f79dac3a4, regionState=CLOSED 2024-12-02T04:27:59,876 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:27:59,879 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=ecde63e4e4de7e6bec0f1bffc7ef31bd, regionState=CLOSED 2024-12-02T04:27:59,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217 because future has completed 2024-12-02T04:27:59,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:27:59,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=229 2024-12-02T04:27:59,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure 428273f3ac5f77d1702cb19f79dac3a4, server=e2eaa0f11f7e,37413,1733113342217 in 167 msec 2024-12-02T04:27:59,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-02T04:27:59,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure ecde63e4e4de7e6bec0f1bffc7ef31bd, server=e2eaa0f11f7e,34291,1733113342258 in 170 msec 2024-12-02T04:27:59,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=428273f3ac5f77d1702cb19f79dac3a4, UNASSIGN in 175 msec 2024-12-02T04:27:59,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=230, resume processing ppid=228 2024-12-02T04:27:59,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ecde63e4e4de7e6bec0f1bffc7ef31bd, UNASSIGN in 177 msec 2024-12-02T04:27:59,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-02T04:27:59,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 183 msec 2024-12-02T04:27:59,899 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113679899"}]},"ts":"1733113679899"} 2024-12-02T04:27:59,902 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-02T04:27:59,902 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-02T04:27:59,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 198 msec 2024-12-02T04:28:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-02T04:28:00,028 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T04:28:00,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-02T04:28:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,030 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-02T04:28:00,031 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,033 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-02T04:28:00,036 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:28:00,037 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:28:00,038 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits] 2024-12-02T04:28:00,038 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/recovered.edits] 2024-12-02T04:28:00,044 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/cf/4224f7e12588427498bb6912d6a5b1e6 2024-12-02T04:28:00,046 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/cf/994fab0933c04e3f8b0c18c8de4c0e58 2024-12-02T04:28:00,047 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits/12.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4/recovered.edits/12.seqid 2024-12-02T04:28:00,047 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:28:00,048 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd/recovered.edits/9.seqid 2024-12-02T04:28:00,048 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportWithChecksum/ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:28:00,048 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-02T04:28:00,049 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-02T04:28:00,049 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-02T04:28:00,052 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202cab75be6eab549dab7d227d59765c238_ecde63e4e4de7e6bec0f1bffc7ef31bd 2024-12-02T04:28:00,053 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029a1cbfed07244905a32c4a7d2d48a5a8_428273f3ac5f77d1702cb19f79dac3a4 2024-12-02T04:28:00,054 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-02T04:28:00,056 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,058 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-02T04:28:00,061 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-02T04:28:00,062 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,062 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-02T04:28:00,063 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113680062"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:00,063 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113680062"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:00,065 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:28:00,065 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 428273f3ac5f77d1702cb19f79dac3a4, NAME => 'testtb-testExportWithChecksum,,1733113615947.428273f3ac5f77d1702cb19f79dac3a4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ecde63e4e4de7e6bec0f1bffc7ef31bd, NAME => 'testtb-testExportWithChecksum,1,1733113615947.ecde63e4e4de7e6bec0f1bffc7ef31bd.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:28:00,065 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-02T04:28:00,065 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113680065"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:00,068 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-02T04:28:00,069 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T04:28:00,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 41 msec 2024-12-02T04:28:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T04:28:00,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T04:28:00,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T04:28:00,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T04:28:00,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T04:28:00,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-02T04:28:00,150 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-02T04:28:00,150 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T04:28:00,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T04:28:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-02T04:28:00,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T04:28:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-02T04:28:00,190 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=827 (was 820) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_118587687_1 at /127.0.0.1:35512 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:35524 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 156088) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37255 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:46930 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8951 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:37255 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ContainersLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_118587687_1 at /127.0.0.1:46916 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:55822 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=825 (was 823) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=903 (was 643) - SystemLoadAverage LEAK? -, ProcessCount=25 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1743 (was 2339) 2024-12-02T04:28:00,190 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=827 is superior to 500 2024-12-02T04:28:00,214 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=827, OpenFileDescriptor=825, MaxFileDescriptor=1048576, SystemLoadAverage=903, ProcessCount=25, AvailableMemoryMB=1740 2024-12-02T04:28:00,214 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=827 is superior to 500 2024-12-02T04:28:00,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T04:28:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:00,219 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T04:28:00,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-02T04:28:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-02T04:28:00,220 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T04:28:00,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742412_1588 (size=454) 2024-12-02T04:28:00,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742412_1588 (size=454) 2024-12-02T04:28:00,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742412_1588 (size=454) 2024-12-02T04:28:00,238 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6ee5bdca37b5c745aa7802418d52884d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:28:00,238 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b98dc94d1f146db1a2905fcdb5b335f9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:28:00,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742414_1590 (size=79) 2024-12-02T04:28:00,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742414_1590 (size=79) 2024-12-02T04:28:00,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742414_1590 (size=79) 2024-12-02T04:28:00,258 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:28:00,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742413_1589 (size=79) 2024-12-02T04:28:00,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing b98dc94d1f146db1a2905fcdb5b335f9, disabling compactions & flushes 2024-12-02T04:28:00,259 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. after waiting 0 ms 2024-12-02T04:28:00,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,259 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for b98dc94d1f146db1a2905fcdb5b335f9: Waiting for close lock at 1733113680258Disabling compacts and flushes for region at 1733113680258Disabling writes for close at 1733113680259 (+1 ms)Writing region close event to WAL at 1733113680259Closed at 1733113680259 2024-12-02T04:28:00,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742413_1589 (size=79) 2024-12-02T04:28:00,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742413_1589 (size=79) 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 6ee5bdca37b5c745aa7802418d52884d, disabling compactions & flushes 2024-12-02T04:28:00,260 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. after waiting 0 ms 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,260 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6ee5bdca37b5c745aa7802418d52884d: Waiting for close lock at 1733113680260Disabling compacts and flushes for region at 1733113680260Disabling writes for close at 1733113680260Writing region close event to WAL at 1733113680260Closed at 1733113680260 2024-12-02T04:28:00,262 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T04:28:00,262 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733113680262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113680262"}]},"ts":"1733113680262"} 2024-12-02T04:28:00,263 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733113680262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733113680262"}]},"ts":"1733113680262"} 2024-12-02T04:28:00,267 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T04:28:00,269 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T04:28:00,269 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113680269"}]},"ts":"1733113680269"} 2024-12-02T04:28:00,271 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-02T04:28:00,272 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T04:28:00,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T04:28:00,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T04:28:00,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T04:28:00,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T04:28:00,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, ASSIGN}] 2024-12-02T04:28:00,276 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, ASSIGN 2024-12-02T04:28:00,276 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, ASSIGN 2024-12-02T04:28:00,278 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,41643,1733113342088; forceNewPlan=false, retain=false 2024-12-02T04:28:00,278 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,34291,1733113342258; forceNewPlan=false, retain=false 2024-12-02T04:28:00,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-02T04:28:00,429 INFO [e2eaa0f11f7e:34209 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T04:28:00,429 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=6ee5bdca37b5c745aa7802418d52884d, regionState=OPENING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:28:00,429 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=b98dc94d1f146db1a2905fcdb5b335f9, regionState=OPENING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:28:00,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, ASSIGN because future has completed 2024-12-02T04:28:00,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:28:00,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, ASSIGN because future has completed 2024-12-02T04:28:00,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:28:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-02T04:28:00,586 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,586 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => 6ee5bdca37b5c745aa7802418d52884d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T04:28:00,586 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. service=AccessControlService 2024-12-02T04:28:00,587 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:28:00,587 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,587 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:28:00,587 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,587 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,590 INFO [StoreOpener-6ee5bdca37b5c745aa7802418d52884d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,590 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,590 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => b98dc94d1f146db1a2905fcdb5b335f9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T04:28:00,591 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. service=AccessControlService 2024-12-02T04:28:00,591 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T04:28:00,591 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,591 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T04:28:00,591 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,591 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,591 INFO [StoreOpener-6ee5bdca37b5c745aa7802418d52884d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ee5bdca37b5c745aa7802418d52884d columnFamilyName cf 2024-12-02T04:28:00,593 DEBUG [StoreOpener-6ee5bdca37b5c745aa7802418d52884d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:00,593 INFO [StoreOpener-b98dc94d1f146db1a2905fcdb5b335f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,594 INFO [StoreOpener-6ee5bdca37b5c745aa7802418d52884d-1 {}] regionserver.HStore(327): Store=6ee5bdca37b5c745aa7802418d52884d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:28:00,594 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,594 INFO [StoreOpener-b98dc94d1f146db1a2905fcdb5b335f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b98dc94d1f146db1a2905fcdb5b335f9 columnFamilyName cf 2024-12-02T04:28:00,595 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,595 DEBUG [StoreOpener-b98dc94d1f146db1a2905fcdb5b335f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:00,595 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,595 INFO [StoreOpener-b98dc94d1f146db1a2905fcdb5b335f9-1 {}] regionserver.HStore(327): Store=b98dc94d1f146db1a2905fcdb5b335f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T04:28:00,595 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,595 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,596 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,596 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,598 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,599 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,599 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,599 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,601 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,602 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:28:00,603 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T04:28:00,604 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened b98dc94d1f146db1a2905fcdb5b335f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67681876, jitterRate=0.008538544178009033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:28:00,604 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,604 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for b98dc94d1f146db1a2905fcdb5b335f9: Running coprocessor pre-open hook at 1733113680591Writing region info on filesystem at 1733113680591Initializing all the Stores at 1733113680593 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113680593Cleaning up temporary data from old regions at 1733113680599 (+6 ms)Running coprocessor post-open hooks at 1733113680604 (+5 ms)Region opened successfully at 1733113680604 2024-12-02T04:28:00,606 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9., pid=238, masterSystemTime=1733113680584 2024-12-02T04:28:00,606 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened 6ee5bdca37b5c745aa7802418d52884d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63948174, jitterRate=-0.04709795117378235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T04:28:00,606 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,606 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for 6ee5bdca37b5c745aa7802418d52884d: Running coprocessor pre-open hook at 1733113680587Writing region info on filesystem at 1733113680587Initializing all the Stores at 1733113680588 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733113680588Cleaning up temporary data from old regions at 1733113680595 (+7 ms)Running coprocessor post-open hooks at 1733113680606 (+11 ms)Region opened successfully at 1733113680606 2024-12-02T04:28:00,607 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d., pid=237, masterSystemTime=1733113680583 2024-12-02T04:28:00,608 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,608 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:00,609 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=b98dc94d1f146db1a2905fcdb5b335f9, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:28:00,609 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,609 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,610 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=6ee5bdca37b5c745aa7802418d52884d, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:28:00,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:28:00,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:28:00,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-12-02T04:28:00,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258 in 184 msec 2024-12-02T04:28:00,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=235 2024-12-02T04:28:00,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088 in 187 msec 2024-12-02T04:28:00,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, ASSIGN in 346 msec 2024-12-02T04:28:00,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=234 2024-12-02T04:28:00,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, ASSIGN in 347 msec 2024-12-02T04:28:00,624 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T04:28:00,625 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113680624"}]},"ts":"1733113680624"} 2024-12-02T04:28:00,626 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-02T04:28:00,627 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T04:28:00,627 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-02T04:28:00,631 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T04:28:00,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:00,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,687 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 464 msec 2024-12-02T04:28:00,687 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:00,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-02T04:28:00,849 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T04:28:00,849 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:00,856 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:00,856 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:00,856 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:28:00,858 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:00,863 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:00,867 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:00,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T04:28:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113680869 (current time:1733113680869). 2024-12-02T04:28:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:28:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T04:28:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:28:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30e0a223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:28:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:28:00,871 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ecbd84d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:28:00,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,872 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:28:00,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64326b52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:28:00,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:28:00,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:00,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:00,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:28:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:28:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,875 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:28:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de67fa2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:28:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:28:00,876 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:28:00,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:28:00,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:28:00,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a4990b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:28:00,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:28:00,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,877 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:28:00,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53b265ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:00,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:28:00,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:28:00,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:00,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35760, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:00,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:28:00,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:00,882 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:00,883 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:28:00,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:28:00,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:00,883 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:28:00,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T04:28:00,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:28:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T04:28:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T04:28:00,885 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:28:00,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T04:28:00,886 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:28:00,888 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:28:00,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742415_1591 (size=203) 2024-12-02T04:28:00,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742415_1591 (size=203) 2024-12-02T04:28:00,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742415_1591 (size=203) 2024-12-02T04:28:00,894 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:28:00,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9}] 2024-12-02T04:28:00,894 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:00,894 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:00,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T04:28:01,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-02T04:28:01,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for b98dc94d1f146db1a2905fcdb5b335f9: 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for 6ee5bdca37b5c745aa7802418d52884d: 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:28:01,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:28:01,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:28:01,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T04:28:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742417_1593 (size=82) 2024-12-02T04:28:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742417_1593 (size=82) 2024-12-02T04:28:01,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742417_1593 (size=82) 2024-12-02T04:28:01,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:01,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-02T04:28:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-02T04:28:01,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,056 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 in 163 msec 2024-12-02T04:28:01,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742416_1592 (size=82) 2024-12-02T04:28:01,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742416_1592 (size=82) 2024-12-02T04:28:01,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742416_1592 (size=82) 2024-12-02T04:28:01,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:01,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-02T04:28:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-02T04:28:01,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,064 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-02T04:28:01,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d in 171 msec 2024-12-02T04:28:01,067 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:28:01,067 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:28:01,068 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:28:01,068 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:28:01,068 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:01,069 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T04:28:01,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742418_1594 (size=74) 2024-12-02T04:28:01,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742418_1594 (size=74) 2024-12-02T04:28:01,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742418_1594 (size=74) 2024-12-02T04:28:01,076 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:28:01,076 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,077 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742419_1595 (size=697) 2024-12-02T04:28:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742419_1595 (size=697) 2024-12-02T04:28:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742419_1595 (size=697) 2024-12-02T04:28:01,087 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:28:01,091 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:28:01,091 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,093 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:28:01,093 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T04:28:01,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 209 msec 2024-12-02T04:28:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T04:28:01,199 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T04:28:01,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41643 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:28:01,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34291 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T04:28:01,211 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:01,213 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,213 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:01,213 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T04:28:01,214 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:01,218 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:01,223 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T04:28:01,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733113681225 (current time:1733113681225). 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dacc3da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:28:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:28:01,226 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:28:01,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:28:01,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:28:01,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a24e88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:28:01,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:28:01,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,227 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:28:01,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce6d6ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:28:01,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:28:01,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:01,230 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:01,230 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,231 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37aa1c40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34209,-1 for getting cluster id 2024-12-02T04:28:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T04:28:01,232 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbdb8f4d-d324-4e0c-81d0-e4860d09061c' 2024-12-02T04:28:01,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T04:28:01,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbdb8f4d-d324-4e0c-81d0-e4860d09061c" 2024-12-02T04:28:01,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f90cf1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34209,-1] 2024-12-02T04:28:01,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T04:28:01,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,233 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T04:28:01,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@676ca063, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T04:28:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T04:28:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,34291,1733113342258, seqNum=-1] 2024-12-02T04:28:01,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:01,235 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:01,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., hostname=e2eaa0f11f7e,41643,1733113342088, seqNum=2] 2024-12-02T04:28:01,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T04:28:01,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T04:28:01,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209. 2024-12-02T04:28:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor258.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T04:28:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:01,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:28:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T04:28:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T04:28:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T04:28:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-02T04:28:01,241 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T04:28:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T04:28:01,242 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T04:28:01,244 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T04:28:01,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742420_1596 (size=198) 2024-12-02T04:28:01,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742420_1596 (size=198) 2024-12-02T04:28:01,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742420_1596 (size=198) 2024-12-02T04:28:01,251 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T04:28:01,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9}] 2024-12-02T04:28:01,252 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,252 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T04:28:01,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-02T04:28:01,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34291 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-02T04:28:01,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:01,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:01,406 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing 6ee5bdca37b5c745aa7802418d52884d 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-02T04:28:01,406 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing b98dc94d1f146db1a2905fcdb5b335f9 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-02T04:28:01,423 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-02T04:28:01,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d is 71, key is 0050c8c9bc0fb2df95a35388420cc0cf/cf:q/1733113681209/Put/seqid=0 2024-12-02T04:28:01,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 is 71, key is 127afd18c2569ccc8d2131a89f968124/cf:q/1733113681210/Put/seqid=0 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742421_1597 (size=5311) 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742422_1598 (size=7962) 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742421_1597 (size=5311) 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742422_1598 (size=7962) 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742421_1597 (size=5311) 2024-12-02T04:28:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742422_1598 (size=7962) 2024-12-02T04:28:01,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:01,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:01,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/.tmp/cf/8f518f23f83a42c0958c8736f7b07732, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=b98dc94d1f146db1a2905fcdb5b335f9] 2024-12-02T04:28:01,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/.tmp/cf/27fbabbac25a4df680c7e6f2fe040084, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=6ee5bdca37b5c745aa7802418d52884d] 2024-12-02T04:28:01,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/.tmp/cf/8f518f23f83a42c0958c8736f7b07732 is 220, key is 11a4936b272583af48a151bc0be024b44/cf:q/1733113681210/Put/seqid=0 2024-12-02T04:28:01,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/.tmp/cf/27fbabbac25a4df680c7e6f2fe040084 is 220, key is 02a9adbabacdf9b6320d80fd7cd3b60fa/cf:q/1733113681209/Put/seqid=0 2024-12-02T04:28:01,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742423_1599 (size=6608) 2024-12-02T04:28:01,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742423_1599 (size=6608) 2024-12-02T04:28:01,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742423_1599 (size=6608) 2024-12-02T04:28:01,440 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/.tmp/cf/27fbabbac25a4df680c7e6f2fe040084 2024-12-02T04:28:01,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742424_1600 (size=14879) 2024-12-02T04:28:01,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742424_1600 (size=14879) 2024-12-02T04:28:01,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742424_1600 (size=14879) 2024-12-02T04:28:01,441 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/.tmp/cf/8f518f23f83a42c0958c8736f7b07732 2024-12-02T04:28:01,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/.tmp/cf/27fbabbac25a4df680c7e6f2fe040084 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084 2024-12-02T04:28:01,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/.tmp/cf/8f518f23f83a42c0958c8736f7b07732 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732 2024-12-02T04:28:01,448 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732, entries=44, sequenceid=6, filesize=14.5 K 2024-12-02T04:28:01,448 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084, entries=6, sequenceid=6, filesize=6.5 K 2024-12-02T04:28:01,449 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for b98dc94d1f146db1a2905fcdb5b335f9 in 44ms, sequenceid=6, compaction requested=false 2024-12-02T04:28:01,449 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 6ee5bdca37b5c745aa7802418d52884d in 44ms, sequenceid=6, compaction requested=false 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 6ee5bdca37b5c745aa7802418d52884d: 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for b98dc94d1f146db1a2905fcdb5b335f9: 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732] hfiles 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084] hfiles 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742425_1601 (size=121) 2024-12-02T04:28:01,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742425_1601 (size=121) 2024-12-02T04:28:01,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742425_1601 (size=121) 2024-12-02T04:28:01,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:01,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-02T04:28:01,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-02T04:28:01,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,463 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9 in 212 msec 2024-12-02T04:28:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742426_1602 (size=121) 2024-12-02T04:28:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742426_1602 (size=121) 2024-12-02T04:28:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742426_1602 (size=121) 2024-12-02T04:28:01,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:01,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-02T04:28:01,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-02T04:28:01,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,470 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-02T04:28:01,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ee5bdca37b5c745aa7802418d52884d in 220 msec 2024-12-02T04:28:01,472 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T04:28:01,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T04:28:01,474 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T04:28:01,474 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T04:28:01,474 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T04:28:01,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d] hfiles 2024-12-02T04:28:01,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:01,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:01,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742427_1603 (size=305) 2024-12-02T04:28:01,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742427_1603 (size=305) 2024-12-02T04:28:01,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742427_1603 (size=305) 2024-12-02T04:28:01,481 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T04:28:01,481 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,481 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742428_1604 (size=1007) 2024-12-02T04:28:01,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742428_1604 (size=1007) 2024-12-02T04:28:01,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742428_1604 (size=1007) 2024-12-02T04:28:01,492 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T04:28:01,496 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T04:28:01,496 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,497 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T04:28:01,497 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-02T04:28:01,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 258 msec 2024-12-02T04:28:01,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T04:28:01,558 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T04:28:01,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558 2024-12-02T04:28:01,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34539, tgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558, rawTgtDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558, srcFsUri=hdfs://localhost:34539, srcDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:28:01,581 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34539, inputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd 2024-12-02T04:28:01,581 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,582 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T04:28:01,586 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742429_1605 (size=198) 2024-12-02T04:28:01,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742429_1605 (size=198) 2024-12-02T04:28:01,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742429_1605 (size=198) 2024-12-02T04:28:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742430_1606 (size=1007) 2024-12-02T04:28:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742430_1606 (size=1007) 2024-12-02T04:28:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742430_1606 (size=1007) 2024-12-02T04:28:01,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:01,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:01,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:01,676 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-02T04:28:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T04:28:02,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-10540955362186980283.jar 2024-12-02T04:28:02,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop-5953900969562962289.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T04:28:02,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T04:28:02,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:28:02,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T04:28:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742431_1607 (size=131440) 2024-12-02T04:28:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742431_1607 (size=131440) 2024-12-02T04:28:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742431_1607 (size=131440) 2024-12-02T04:28:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T04:28:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T04:28:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T04:28:02,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T04:28:02,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T04:28:02,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T04:28:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742434_1610 (size=903927) 2024-12-02T04:28:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742434_1610 (size=903927) 2024-12-02T04:28:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742434_1610 (size=903927) 2024-12-02T04:28:02,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d95ecee29e5f13e9036a6e1711d1a0f3, had cached 0 bytes from a total of 6284 2024-12-02T04:28:02,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 37a29921e433fcac8df396b670af8f39, had cached 0 bytes from a total of 14265 2024-12-02T04:28:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T04:28:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T04:28:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T04:28:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T04:28:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T04:28:02,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T04:28:02,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742437_1613 (size=440957) 2024-12-02T04:28:02,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742437_1613 (size=440957) 2024-12-02T04:28:02,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742437_1613 (size=440957) 2024-12-02T04:28:02,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742438_1614 (size=77835) 2024-12-02T04:28:02,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742438_1614 (size=77835) 2024-12-02T04:28:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742438_1614 (size=77835) 2024-12-02T04:28:03,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742439_1615 (size=30949) 2024-12-02T04:28:03,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742439_1615 (size=30949) 2024-12-02T04:28:03,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742439_1615 (size=30949) 2024-12-02T04:28:03,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742440_1616 (size=1597213) 2024-12-02T04:28:03,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742440_1616 (size=1597213) 2024-12-02T04:28:03,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742440_1616 (size=1597213) 2024-12-02T04:28:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742441_1617 (size=4695811) 2024-12-02T04:28:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742441_1617 (size=4695811) 2024-12-02T04:28:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742441_1617 (size=4695811) 2024-12-02T04:28:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742442_1618 (size=232957) 2024-12-02T04:28:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742442_1618 (size=232957) 2024-12-02T04:28:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742442_1618 (size=232957) 2024-12-02T04:28:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742443_1619 (size=127628) 2024-12-02T04:28:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742443_1619 (size=127628) 2024-12-02T04:28:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742443_1619 (size=127628) 2024-12-02T04:28:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742444_1620 (size=20406) 2024-12-02T04:28:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742444_1620 (size=20406) 2024-12-02T04:28:03,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742444_1620 (size=20406) 2024-12-02T04:28:03,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742445_1621 (size=5175431) 2024-12-02T04:28:03,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742445_1621 (size=5175431) 2024-12-02T04:28:03,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742445_1621 (size=5175431) 2024-12-02T04:28:03,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742446_1622 (size=6424732) 2024-12-02T04:28:03,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742446_1622 (size=6424732) 2024-12-02T04:28:03,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742446_1622 (size=6424732) 2024-12-02T04:28:03,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742447_1623 (size=217634) 2024-12-02T04:28:03,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742447_1623 (size=217634) 2024-12-02T04:28:03,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742447_1623 (size=217634) 2024-12-02T04:28:03,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T04:28:03,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T04:28:03,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T04:28:03,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742449_1625 (size=322274) 2024-12-02T04:28:03,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742449_1625 (size=322274) 2024-12-02T04:28:03,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742449_1625 (size=322274) 2024-12-02T04:28:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742450_1626 (size=503880) 2024-12-02T04:28:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742450_1626 (size=503880) 2024-12-02T04:28:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742450_1626 (size=503880) 2024-12-02T04:28:03,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742451_1627 (size=29229) 2024-12-02T04:28:03,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742451_1627 (size=29229) 2024-12-02T04:28:03,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742451_1627 (size=29229) 2024-12-02T04:28:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742452_1628 (size=24096) 2024-12-02T04:28:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742452_1628 (size=24096) 2024-12-02T04:28:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742452_1628 (size=24096) 2024-12-02T04:28:03,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742453_1629 (size=111872) 2024-12-02T04:28:03,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742453_1629 (size=111872) 2024-12-02T04:28:03,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742453_1629 (size=111872) 2024-12-02T04:28:03,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742454_1630 (size=45609) 2024-12-02T04:28:03,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742454_1630 (size=45609) 2024-12-02T04:28:03,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742454_1630 (size=45609) 2024-12-02T04:28:03,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742455_1631 (size=136454) 2024-12-02T04:28:03,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742455_1631 (size=136454) 2024-12-02T04:28:03,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742455_1631 (size=136454) 2024-12-02T04:28:03,979 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T04:28:03,982 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-02T04:28:03,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T04:28:03,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-02T04:28:03,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.5 K 2024-12-02T04:28:03,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-12-02T04:28:03,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742456_1632 (size=1079) 2024-12-02T04:28:03,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742456_1632 (size=1079) 2024-12-02T04:28:03,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742456_1632 (size=1079) 2024-12-02T04:28:03,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742457_1633 (size=35) 2024-12-02T04:28:03,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742457_1633 (size=35) 2024-12-02T04:28:03,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742457_1633 (size=35) 2024-12-02T04:28:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742458_1634 (size=304168) 2024-12-02T04:28:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742458_1634 (size=304168) 2024-12-02T04:28:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742458_1634 (size=304168) 2024-12-02T04:28:04,333 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:28:04,333 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T04:28:04,335 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0010_000001 (auth:SIMPLE) from 127.0.0.1:54564 2024-12-02T04:28:04,347 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000001/launch_container.sh] 2024-12-02T04:28:04,347 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000001/container_tokens] 2024-12-02T04:28:04,347 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_0/usercache/jenkins/appcache/application_1733113348655_0010/container_1733113348655_0010_01_000001/sysfs] 2024-12-02T04:28:04,726 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:55036 2024-12-02T04:28:05,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:28:08,901 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:52354 2024-12-02T04:28:09,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742459_1635 (size=349890) 2024-12-02T04:28:09,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742459_1635 (size=349890) 2024-12-02T04:28:09,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742459_1635 (size=349890) 2024-12-02T04:28:11,117 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:54574 2024-12-02T04:28:11,117 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:55050 2024-12-02T04:28:11,993 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:40270 2024-12-02T04:28:11,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:42944 2024-12-02T04:28:14,335 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733113348655_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T04:28:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742460_1636 (size=7962) 2024-12-02T04:28:15,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742460_1636 (size=7962) 2024-12-02T04:28:15,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742460_1636 (size=7962) 2024-12-02T04:28:16,107 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000003/launch_container.sh] 2024-12-02T04:28:16,107 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000003/container_tokens] 2024-12-02T04:28:16,107 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000003/sysfs] 2024-12-02T04:28:18,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742462_1638 (size=5311) 2024-12-02T04:28:18,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742462_1638 (size=5311) 2024-12-02T04:28:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742462_1638 (size=5311) 2024-12-02T04:28:18,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742463_1639 (size=14879) 2024-12-02T04:28:18,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742463_1639 (size=14879) 2024-12-02T04:28:18,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742463_1639 (size=14879) 2024-12-02T04:28:18,307 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000005/launch_container.sh] 2024-12-02T04:28:18,307 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000005/container_tokens] 2024-12-02T04:28:18,307 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_2/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000005/sysfs] 2024-12-02T04:28:18,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742464_1640 (size=6608) 2024-12-02T04:28:18,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742464_1640 (size=6608) 2024-12-02T04:28:18,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742464_1640 (size=6608) 2024-12-02T04:28:18,397 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000002/launch_container.sh] 2024-12-02T04:28:18,397 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000002/container_tokens] 2024-12-02T04:28:18,398 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_3/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000002/sysfs] 2024-12-02T04:28:18,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742461_1637 (size=31803) 2024-12-02T04:28:18,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742461_1637 (size=31803) 2024-12-02T04:28:18,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742461_1637 (size=31803) 2024-12-02T04:28:18,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742465_1641 (size=477) 2024-12-02T04:28:18,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742465_1641 (size=477) 2024-12-02T04:28:18,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742465_1641 (size=477) 2024-12-02T04:28:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742466_1642 (size=31803) 2024-12-02T04:28:18,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742466_1642 (size=31803) 2024-12-02T04:28:18,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742466_1642 (size=31803) 2024-12-02T04:28:18,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742467_1643 (size=349890) 2024-12-02T04:28:18,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742467_1643 (size=349890) 2024-12-02T04:28:18,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742467_1643 (size=349890) 2024-12-02T04:28:18,533 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:42960 2024-12-02T04:28:18,542 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:40278 2024-12-02T04:28:18,549 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:40290 2024-12-02T04:28:20,166 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T04:28:20,167 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T04:28:20,178 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,178 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T04:28:20,179 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-655514339_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T04:28:20,179 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/export-test/export-1733113681558/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T04:28:20,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-02T04:28:20,200 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113700199"}]},"ts":"1733113700199"} 2024-12-02T04:28:20,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-02T04:28:20,203 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-02T04:28:20,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-02T04:28:20,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, UNASSIGN}] 2024-12-02T04:28:20,206 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, UNASSIGN 2024-12-02T04:28:20,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, UNASSIGN 2024-12-02T04:28:20,207 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=b98dc94d1f146db1a2905fcdb5b335f9, regionState=CLOSING, regionLocation=e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:28:20,207 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=6ee5bdca37b5c745aa7802418d52884d, regionState=CLOSING, regionLocation=e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:28:20,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, UNASSIGN because future has completed 2024-12-02T04:28:20,208 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:28:20,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258}] 2024-12-02T04:28:20,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, UNASSIGN because future has completed 2024-12-02T04:28:20,209 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T04:28:20,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088}] 2024-12-02T04:28:20,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-02T04:28:20,361 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:20,361 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:28:20,361 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing b98dc94d1f146db1a2905fcdb5b335f9, disabling compactions & flushes 2024-12-02T04:28:20,361 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:20,362 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:20,362 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. after waiting 0 ms 2024-12-02T04:28:20,362 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:20,363 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:20,363 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T04:28:20,363 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 6ee5bdca37b5c745aa7802418d52884d, disabling compactions & flushes 2024-12-02T04:28:20,363 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:20,363 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:20,363 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. after waiting 0 ms 2024-12-02T04:28:20,363 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:20,369 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:28:20,370 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:20,371 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9. 2024-12-02T04:28:20,371 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for b98dc94d1f146db1a2905fcdb5b335f9: Waiting for close lock at 1733113700361Running coprocessor pre-close hooks at 1733113700361Disabling compacts and flushes for region at 1733113700361Disabling writes for close at 1733113700362 (+1 ms)Writing region close event to WAL at 1733113700363 (+1 ms)Running coprocessor post-close hooks at 1733113700370 (+7 ms)Closed at 1733113700371 (+1 ms) 2024-12-02T04:28:20,373 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:20,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=b98dc94d1f146db1a2905fcdb5b335f9, regionState=CLOSED 2024-12-02T04:28:20,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258 because future has completed 2024-12-02T04:28:20,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-12-02T04:28:20,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure b98dc94d1f146db1a2905fcdb5b335f9, server=e2eaa0f11f7e,34291,1733113342258 in 169 msec 2024-12-02T04:28:20,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=b98dc94d1f146db1a2905fcdb5b335f9, UNASSIGN in 175 msec 2024-12-02T04:28:20,382 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T04:28:20,383 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:20,383 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d. 2024-12-02T04:28:20,383 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 6ee5bdca37b5c745aa7802418d52884d: Waiting for close lock at 1733113700363Running coprocessor pre-close hooks at 1733113700363Disabling compacts and flushes for region at 1733113700363Disabling writes for close at 1733113700363Writing region close event to WAL at 1733113700371 (+8 ms)Running coprocessor post-close hooks at 1733113700383 (+12 ms)Closed at 1733113700383 2024-12-02T04:28:20,384 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:20,385 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=6ee5bdca37b5c745aa7802418d52884d, regionState=CLOSED 2024-12-02T04:28:20,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088 because future has completed 2024-12-02T04:28:20,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=247 2024-12-02T04:28:20,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure 6ee5bdca37b5c745aa7802418d52884d, server=e2eaa0f11f7e,41643,1733113342088 in 179 msec 2024-12-02T04:28:20,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=246 2024-12-02T04:28:20,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6ee5bdca37b5c745aa7802418d52884d, UNASSIGN in 185 msec 2024-12-02T04:28:20,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-02T04:28:20,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 188 msec 2024-12-02T04:28:20,394 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733113700394"}]},"ts":"1733113700394"} 2024-12-02T04:28:20,396 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-02T04:28:20,396 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-02T04:28:20,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 202 msec 2024-12-02T04:28:20,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-02T04:28:20,518 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T04:28:20,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,520 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,522 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,524 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41643 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,527 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:20,527 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:20,529 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/recovered.edits] 2024-12-02T04:28:20,530 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf, FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/recovered.edits] 2024-12-02T04:28:20,534 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/cf/8f518f23f83a42c0958c8736f7b07732 2024-12-02T04:28:20,534 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/cf/27fbabbac25a4df680c7e6f2fe040084 2024-12-02T04:28:20,542 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d/recovered.edits/9.seqid 2024-12-02T04:28:20,542 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/recovered.edits/9.seqid to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9/recovered.edits/9.seqid 2024-12-02T04:28:20,543 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:20,543 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testtb-testExportFileSystemStateWithSkipTmp/6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:20,543 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-02T04:28:20,543 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-02T04:28:20,545 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-02T04:28:20,548 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241202f06543a35bd246dca278474387ef1724_b98dc94d1f146db1a2905fcdb5b335f9 2024-12-02T04:28:20,550 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d to hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241202418fd122ec634108bba8718ea756b420_6ee5bdca37b5c745aa7802418d52884d 2024-12-02T04:28:20,550 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-02T04:28:20,553 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,556 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-02T04:28:20,559 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-02T04:28:20,560 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,561 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-02T04:28:20,561 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113700561"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:20,561 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733113700561"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:20,564 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T04:28:20,564 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6ee5bdca37b5c745aa7802418d52884d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733113680216.6ee5bdca37b5c745aa7802418d52884d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b98dc94d1f146db1a2905fcdb5b335f9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733113680216.b98dc94d1f146db1a2905fcdb5b335f9.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T04:28:20,564 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-02T04:28:20,564 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:28:20,564 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733113700564"}]},"ts":"9223372036854775807"} 2024-12-02T04:28:20,566 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-02T04:28:20,567 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 49 msec 2024-12-02T04:28:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T04:28:20,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T04:28:20,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T04:28:20,604 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:20,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:20,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T04:28:20,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-02T04:28:20,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:20,613 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,613 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T04:28:20,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:20,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:20,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T04:28:20,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T04:28:20,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T04:28:20,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:20,657 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=829 (was 827) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:38312 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 159410) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9813 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:45774 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:40249 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_909415410_1 at /127.0.0.1:47832 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-655514339_22 at /127.0.0.1:47856 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40249 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=816 (was 825), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=845 (was 903), ProcessCount=25 (was 25), AvailableMemoryMB=1555 (was 1740) 2024-12-02T04:28:20,657 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=829 is superior to 500 2024-12-02T04:28:20,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-02T04:28:20,666 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e5d7b2d{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T04:28:20,672 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6882bce1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:28:20,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:28:20,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e501ac3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T04:28:20,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@676df96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:28:21,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T04:28:23,571 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000004/launch_container.sh] 2024-12-02T04:28:23,571 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000004/container_tokens] 2024-12-02T04:28:23,571 WARN [ContainersLauncher #9 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-1_1/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000004/sysfs] 2024-12-02T04:28:23,898 WARN [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 1 2024-12-02T04:28:24,655 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733113348655_0011_000001 (auth:SIMPLE) from 127.0.0.1:34214 2024-12-02T04:28:24,666 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000001/launch_container.sh] 2024-12-02T04:28:24,666 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000001/container_tokens] 2024-12-02T04:28:24,666 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1102581570/yarn-6804244422/MiniMRCluster_1102581570-localDir-nm-0_0/usercache/jenkins/appcache/application_1733113348655_0011/container_1733113348655_0011_01_000001/sysfs] 2024-12-02T04:28:25,895 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:28:37,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b23b55a{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T04:28:37,729 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ede5a0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:28:37,729 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:28:37,729 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c076869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T04:28:37,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71c771e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:28:47,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d95ecee29e5f13e9036a6e1711d1a0f3, had cached 0 bytes from a total of 6284 2024-12-02T04:28:47,967 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 37a29921e433fcac8df396b670af8f39, had cached 0 bytes from a total of 14265 2024-12-02T04:28:50,564 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:28:54,738 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T04:28:54,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c8a4{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T04:28:54,739 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20feff49{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:28:54,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:28:54,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8c8026f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T04:28:54,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@133e01b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:28:54,741 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-02T04:28:54,746 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-02T04:28:54,746 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-02T04:28:54,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741830_1006 (size=1171841) 2024-12-02T04:28:54,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741830_1006 (size=1171841) 2024-12-02T04:28:54,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741830_1006 (size=1171841) 2024-12-02T04:28:54,750 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T04:28:54,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2bdc1d38{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T04:28:54,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a8ec6fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:28:54,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:28:54,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dba46ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T04:28:54,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f3d9fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:28:54,756 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T04:28:54,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-02T04:28:54,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T04:28:54,756 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T04:28:54,756 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:28:54,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,757 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T04:28:54,757 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T04:28:54,757 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1607255747, stopped=false 2024-12-02T04:28:54,757 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,757 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T04:28:54,757 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2eaa0f11f7e,34209,1733113341303 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:28:54,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:28:54,773 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T04:28:54,774 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T04:28:54,774 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:28:54,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,774 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:28:54,774 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:28:54,774 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:28:54,774 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T04:28:54,775 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,41643,1733113342088' ***** 2024-12-02T04:28:54,775 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,775 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T04:28:54,775 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,37413,1733113342217' ***** 2024-12-02T04:28:54,776 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,776 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T04:28:54,776 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,34291,1733113342258' ***** 2024-12-02T04:28:54,776 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,776 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T04:28:54,776 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T04:28:54,776 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T04:28:54,776 INFO [RS:0;e2eaa0f11f7e:41643 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T04:28:54,776 INFO [RS:1;e2eaa0f11f7e:37413 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T04:28:54,776 INFO [RS:0;e2eaa0f11f7e:41643 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T04:28:54,776 INFO [RS:1;e2eaa0f11f7e:37413 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T04:28:54,776 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T04:28:54,776 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T04:28:54,776 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T04:28:54,776 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(3091): Received CLOSE for d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:28:54,776 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(3091): Received CLOSE for 37a29921e433fcac8df396b670af8f39 2024-12-02T04:28:54,776 INFO [RS:2;e2eaa0f11f7e:34291 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;e2eaa0f11f7e:34291. 2024-12-02T04:28:54,776 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:28:54,776 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,777 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(3091): Received CLOSE for 1cd3dc0c66d2745722bcc18163a3b02e 2024-12-02T04:28:54,777 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T04:28:54,777 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T04:28:54,777 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T04:28:54,777 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T04:28:54,777 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:28:54,777 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T04:28:54,777 INFO [RS:1;e2eaa0f11f7e:37413 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e2eaa0f11f7e:37413. 2024-12-02T04:28:54,777 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:28:54,777 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 37a29921e433fcac8df396b670af8f39, disabling compactions & flushes 2024-12-02T04:28:54,777 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:28:54,777 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T04:28:54,777 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. after waiting 0 ms 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T04:28:54,777 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1325): Online Regions={d95ecee29e5f13e9036a6e1711d1a0f3=testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3.} 2024-12-02T04:28:54,777 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:28:54,777 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T04:28:54,777 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=87.46 KB heapSize=138.27 KB 2024-12-02T04:28:54,777 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1351): Waiting on d95ecee29e5f13e9036a6e1711d1a0f3 2024-12-02T04:28:54,777 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T04:28:54,777 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d95ecee29e5f13e9036a6e1711d1a0f3, disabling compactions & flushes 2024-12-02T04:28:54,778 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:28:54,778 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:28:54,778 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. after waiting 0 ms 2024-12-02T04:28:54,778 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:28:54,778 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:28:54,778 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T04:28:54,778 INFO [RS:0;e2eaa0f11f7e:41643 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2eaa0f11f7e:41643. 2024-12-02T04:28:54,778 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:28:54,778 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,778 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T04:28:54,778 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1325): Online Regions={37a29921e433fcac8df396b670af8f39=testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39., 1cd3dc0c66d2745722bcc18163a3b02e=hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e.} 2024-12-02T04:28:54,778 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1351): Waiting on 1cd3dc0c66d2745722bcc18163a3b02e, 37a29921e433fcac8df396b670af8f39 2024-12-02T04:28:54,781 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/37a29921e433fcac8df396b670af8f39/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,782 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 37a29921e433fcac8df396b670af8f39: Waiting for close lock at 1733113734777Running coprocessor pre-close hooks at 1733113734777Disabling compacts and flushes for region at 1733113734777Disabling writes for close at 1733113734777Writing region close event to WAL at 1733113734778 (+1 ms)Running coprocessor post-close hooks at 1733113734782 (+4 ms)Closed at 1733113734782 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39. 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1cd3dc0c66d2745722bcc18163a3b02e, disabling compactions & flushes 2024-12-02T04:28:54,782 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. after waiting 0 ms 2024-12-02T04:28:54,782 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:28:54,782 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1cd3dc0c66d2745722bcc18163a3b02e 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-02T04:28:54,784 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,787 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/default/testExportExpiredSnapshot/d95ecee29e5f13e9036a6e1711d1a0f3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T04:28:54,787 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,787 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:28:54,787 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d95ecee29e5f13e9036a6e1711d1a0f3: Waiting for close lock at 1733113734777Running coprocessor pre-close hooks at 1733113734777Disabling compacts and flushes for region at 1733113734777Disabling writes for close at 1733113734778 (+1 ms)Writing region close event to WAL at 1733113734781 (+3 ms)Running coprocessor post-close hooks at 1733113734787 (+6 ms)Closed at 1733113734787 2024-12-02T04:28:54,788 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733113592619.d95ecee29e5f13e9036a6e1711d1a0f3. 2024-12-02T04:28:54,797 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/.tmp/l/5e31a0a28e714758917d6d409ea64090 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733113590161/DeleteFamily/seqid=0 2024-12-02T04:28:54,802 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/info/c45bb27b3da3484babb25a07ecae253a is 173, key is testExportExpiredSnapshot,1,1733113592619.37a29921e433fcac8df396b670af8f39./info:regioninfo/1733113592977/Put/seqid=0 2024-12-02T04:28:54,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742468_1644 (size=5860) 2024-12-02T04:28:54,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742468_1644 (size=5860) 2024-12-02T04:28:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742468_1644 (size=5860) 2024-12-02T04:28:54,803 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/.tmp/l/5e31a0a28e714758917d6d409ea64090 2024-12-02T04:28:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742469_1645 (size=15646) 2024-12-02T04:28:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742469_1645 (size=15646) 2024-12-02T04:28:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742469_1645 (size=15646) 2024-12-02T04:28:54,809 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74.45 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/info/c45bb27b3da3484babb25a07ecae253a 2024-12-02T04:28:54,809 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e31a0a28e714758917d6d409ea64090 2024-12-02T04:28:54,810 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/.tmp/l/5e31a0a28e714758917d6d409ea64090 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/l/5e31a0a28e714758917d6d409ea64090 2024-12-02T04:28:54,813 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e31a0a28e714758917d6d409ea64090 2024-12-02T04:28:54,813 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/l/5e31a0a28e714758917d6d409ea64090, entries=14, sequenceid=31, filesize=5.7 K 2024-12-02T04:28:54,814 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 1cd3dc0c66d2745722bcc18163a3b02e in 32ms, sequenceid=31, compaction requested=false 2024-12-02T04:28:54,817 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/acl/1cd3dc0c66d2745722bcc18163a3b02e/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-02T04:28:54,817 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,817 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:28:54,817 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1cd3dc0c66d2745722bcc18163a3b02e: Waiting for close lock at 1733113734782Running coprocessor pre-close hooks at 1733113734782Disabling compacts and flushes for region at 1733113734782Disabling writes for close at 1733113734782Obtaining lock to block concurrent updates at 1733113734782Preparing flush snapshotting stores in 1cd3dc0c66d2745722bcc18163a3b02e at 1733113734782Finished memstore snapshotting hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1733113734783 (+1 ms)Flushing stores of hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. at 1733113734783Flushing 1cd3dc0c66d2745722bcc18163a3b02e/l: creating writer at 1733113734783Flushing 1cd3dc0c66d2745722bcc18163a3b02e/l: appending metadata at 1733113734796 (+13 ms)Flushing 1cd3dc0c66d2745722bcc18163a3b02e/l: closing flushed file at 1733113734797 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d7950ee: reopening flushed file at 1733113734809 (+12 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 1cd3dc0c66d2745722bcc18163a3b02e in 32ms, sequenceid=31, compaction requested=false at 1733113734814 (+5 ms)Writing region close event to WAL at 1733113734814Running coprocessor post-close hooks at 1733113734817 (+3 ms)Closed at 1733113734817 2024-12-02T04:28:54,817 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733113345061.1cd3dc0c66d2745722bcc18163a3b02e. 2024-12-02T04:28:54,825 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/ns/24ff609b8ea34bd7913ebcc20c8117f0 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4./ns:/1733113590186/DeleteFamily/seqid=0 2024-12-02T04:28:54,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742470_1646 (size=8378) 2024-12-02T04:28:54,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742470_1646 (size=8378) 2024-12-02T04:28:54,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742470_1646 (size=8378) 2024-12-02T04:28:54,830 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/ns/24ff609b8ea34bd7913ebcc20c8117f0 2024-12-02T04:28:54,845 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T04:28:54,845 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T04:28:54,847 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/rep_barrier/a136e8a10dbc4584a916947b29dbd0c1 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4./rep_barrier:/1733113590186/DeleteFamily/seqid=0 2024-12-02T04:28:54,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742471_1647 (size=8717) 2024-12-02T04:28:54,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742471_1647 (size=8717) 2024-12-02T04:28:54,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742471_1647 (size=8717) 2024-12-02T04:28:54,852 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/rep_barrier/a136e8a10dbc4584a916947b29dbd0c1 2024-12-02T04:28:54,862 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,866 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,869 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/table/f62a3f114e024dc2b5bd087ff7400801 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733113571043.8cda44f7fdac1fc2fef17d51390bffd4./table:/1733113590186/DeleteFamily/seqid=0 2024-12-02T04:28:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742472_1648 (size=9531) 2024-12-02T04:28:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742472_1648 (size=9531) 2024-12-02T04:28:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742472_1648 (size=9531) 2024-12-02T04:28:54,873 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/table/f62a3f114e024dc2b5bd087ff7400801 2024-12-02T04:28:54,876 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/info/c45bb27b3da3484babb25a07ecae253a as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/info/c45bb27b3da3484babb25a07ecae253a 2024-12-02T04:28:54,879 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/info/c45bb27b3da3484babb25a07ecae253a, entries=84, sequenceid=240, filesize=15.3 K 2024-12-02T04:28:54,880 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/ns/24ff609b8ea34bd7913ebcc20c8117f0 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/ns/24ff609b8ea34bd7913ebcc20c8117f0 2024-12-02T04:28:54,883 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/ns/24ff609b8ea34bd7913ebcc20c8117f0, entries=28, sequenceid=240, filesize=8.2 K 2024-12-02T04:28:54,884 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/rep_barrier/a136e8a10dbc4584a916947b29dbd0c1 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/rep_barrier/a136e8a10dbc4584a916947b29dbd0c1 2024-12-02T04:28:54,888 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/rep_barrier/a136e8a10dbc4584a916947b29dbd0c1, entries=26, sequenceid=240, filesize=8.5 K 2024-12-02T04:28:54,889 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/.tmp/table/f62a3f114e024dc2b5bd087ff7400801 as hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/table/f62a3f114e024dc2b5bd087ff7400801 2024-12-02T04:28:54,892 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/table/f62a3f114e024dc2b5bd087ff7400801, entries=43, sequenceid=240, filesize=9.3 K 2024-12-02T04:28:54,893 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~87.46 KB/89560, heapSize ~138.20 KB/141520, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=240, compaction requested=false 2024-12-02T04:28:54,898 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/data/hbase/meta/1588230740/recovered.edits/243.seqid, newMaxSeqId=243, maxSeqId=1 2024-12-02T04:28:54,899 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:28:54,899 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T04:28:54,899 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T04:28:54,899 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733113734777Running coprocessor pre-close hooks at 1733113734777Disabling compacts and flushes for region at 1733113734777Disabling writes for close at 1733113734777Obtaining lock to block concurrent updates at 1733113734777Preparing flush snapshotting stores in 1588230740 at 1733113734777Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=89560, getHeapSize=141520, getOffHeapSize=0, getCellsCount=676 at 1733113734778 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733113734779 (+1 ms)Flushing 1588230740/info: creating writer at 1733113734779Flushing 1588230740/info: appending metadata at 1733113734801 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733113734801Flushing 1588230740/ns: creating writer at 1733113734813 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733113734825 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733113734825Flushing 1588230740/rep_barrier: creating writer at 1733113734833 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733113734847 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733113734847Flushing 1588230740/table: creating writer at 1733113734854 (+7 ms)Flushing 1588230740/table: appending metadata at 1733113734868 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733113734868Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cb2bc55: reopening flushed file at 1733113734876 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34103f24: reopening flushed file at 1733113734879 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5449ed0f: reopening flushed file at 1733113734884 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fb5d9e6: reopening flushed file at 1733113734888 (+4 ms)Finished flush of dataSize ~87.46 KB/89560, heapSize ~138.20 KB/141520, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=240, compaction requested=false at 1733113734893 (+5 ms)Writing region close event to WAL at 1733113734895 (+2 ms)Running coprocessor post-close hooks at 1733113734899 (+4 ms)Closed at 1733113734899 2024-12-02T04:28:54,899 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T04:28:54,911 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T04:28:54,911 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T04:28:54,918 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T04:28:54,918 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T04:28:54,978 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,37413,1733113342217; all regions closed. 2024-12-02T04:28:54,978 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,34291,1733113342258; all regions closed. 2024-12-02T04:28:54,978 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,41643,1733113342088; all regions closed. 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741834_1010 (size=20712) 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741834_1010 (size=20712) 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741835_1011 (size=13806) 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741834_1010 (size=20712) 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741836_1012 (size=101978) 2024-12-02T04:28:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741835_1011 (size=13806) 2024-12-02T04:28:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741836_1012 (size=101978) 2024-12-02T04:28:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741835_1011 (size=13806) 2024-12-02T04:28:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741836_1012 (size=101978) 2024-12-02T04:28:54,989 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs 2024-12-02T04:28:54,989 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs 2024-12-02T04:28:54,989 INFO [RS:0;e2eaa0f11f7e:41643 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C41643%2C1733113342088:(num 1733113344053) 2024-12-02T04:28:54,989 INFO [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C34291%2C1733113342258.meta:.meta(num 1733113344442) 2024-12-02T04:28:54,989 DEBUG [RS:0;e2eaa0f11f7e:41643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,989 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,989 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T04:28:54,989 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T04:28:54,990 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T04:28:54,990 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T04:28:54,990 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T04:28:54,990 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T04:28:54,990 INFO [RS:0;e2eaa0f11f7e:41643 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41643 2024-12-02T04:28:54,990 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs 2024-12-02T04:28:54,990 INFO [RS:1;e2eaa0f11f7e:37413 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C37413%2C1733113342217:(num 1733113344065) 2024-12-02T04:28:54,990 DEBUG [RS:1;e2eaa0f11f7e:37413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,990 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,990 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T04:28:54,991 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T04:28:54,991 INFO [RS:1;e2eaa0f11f7e:37413 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37413 2024-12-02T04:28:54,992 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T04:28:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073741833_1009 (size=9276) 2024-12-02T04:28:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741833_1009 (size=9276) 2024-12-02T04:28:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073741833_1009 (size=9276) 2024-12-02T04:28:54,996 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/oldWALs 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C34291%2C1733113342258:(num 1733113344048) 2024-12-02T04:28:54,996 DEBUG [RS:2;e2eaa0f11f7e:34291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T04:28:54,996 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T04:28:54,996 INFO [RS:2;e2eaa0f11f7e:34291 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34291 2024-12-02T04:28:55,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,37413,1733113342217 2024-12-02T04:28:55,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T04:28:55,018 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T04:28:55,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,41643,1733113342088 2024-12-02T04:28:55,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,34291,1733113342258 2024-12-02T04:28:55,027 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T04:28:55,027 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T04:28:55,035 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,34291,1733113342258] 2024-12-02T04:28:55,052 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,34291,1733113342258 already deleted, retry=false 2024-12-02T04:28:55,052 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,34291,1733113342258 expired; onlineServers=2 2024-12-02T04:28:55,052 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,41643,1733113342088] 2024-12-02T04:28:55,060 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,41643,1733113342088 already deleted, retry=false 2024-12-02T04:28:55,060 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,41643,1733113342088 expired; onlineServers=1 2024-12-02T04:28:55,060 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,37413,1733113342217] 2024-12-02T04:28:55,068 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,37413,1733113342217 already deleted, retry=false 2024-12-02T04:28:55,069 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,37413,1733113342217 expired; onlineServers=0 2024-12-02T04:28:55,069 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2eaa0f11f7e,34209,1733113341303' ***** 2024-12-02T04:28:55,069 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T04:28:55,069 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T04:28:55,070 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T04:28:55,070 DEBUG [M:0;e2eaa0f11f7e:34209 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T04:28:55,070 DEBUG [M:0;e2eaa0f11f7e:34209 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T04:28:55,070 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T04:28:55,070 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733113343584 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733113343584,5,FailOnTimeoutGroup] 2024-12-02T04:28:55,070 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733113343586 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733113343586,5,FailOnTimeoutGroup] 2024-12-02T04:28:55,071 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.ChoreService(370): Chore service for: master/e2eaa0f11f7e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T04:28:55,071 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T04:28:55,071 DEBUG [M:0;e2eaa0f11f7e:34209 {}] master.HMaster(1795): Stopping service threads 2024-12-02T04:28:55,071 INFO [M:0;e2eaa0f11f7e:34209 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T04:28:55,072 INFO [M:0;e2eaa0f11f7e:34209 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T04:28:55,073 INFO [M:0;e2eaa0f11f7e:34209 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T04:28:55,073 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T04:28:55,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T04:28:55,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T04:28:55,082 DEBUG [M:0;e2eaa0f11f7e:34209 {}] zookeeper.ZKUtil(347): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T04:28:55,082 WARN [M:0;e2eaa0f11f7e:34209 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T04:28:55,084 INFO [M:0;e2eaa0f11f7e:34209 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/.lastflushedseqids 2024-12-02T04:28:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38235 is added to blk_1073742473_1649 (size=329) 2024-12-02T04:28:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38471 is added to blk_1073742473_1649 (size=329) 2024-12-02T04:28:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073742473_1649 (size=329) 2024-12-02T04:28:55,096 INFO [M:0;e2eaa0f11f7e:34209 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T04:28:55,097 INFO [M:0;e2eaa0f11f7e:34209 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T04:28:55,097 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T04:28:55,110 INFO [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:28:55,110 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:28:55,110 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T04:28:55,110 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T04:28:55,110 INFO [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=997.20 KB heapSize=1.17 MB 2024-12-02T04:28:55,110 ERROR [AsyncFSWAL-0-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:28:55,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37413-0x101959075a30002, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,136 INFO [RS:1;e2eaa0f11f7e:37413 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T04:28:55,137 INFO [RS:1;e2eaa0f11f7e:37413 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,37413,1733113342217; zookeeper connection closed. 2024-12-02T04:28:55,137 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35c14b8e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35c14b8e 2024-12-02T04:28:55,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,144 INFO [RS:0;e2eaa0f11f7e:41643 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T04:28:55,144 INFO [RS:2;e2eaa0f11f7e:34291 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T04:28:55,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34291-0x101959075a30003, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41643-0x101959075a30001, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:28:55,144 INFO [RS:2;e2eaa0f11f7e:34291 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,34291,1733113342258; zookeeper connection closed. 2024-12-02T04:28:55,144 INFO [RS:0;e2eaa0f11f7e:41643 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,41643,1733113342088; zookeeper connection closed. 2024-12-02T04:28:55,144 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64841896 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64841896 2024-12-02T04:28:55,145 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4ce87f83 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4ce87f83 2024-12-02T04:28:55,145 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T04:29:00,304 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:29:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:29:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T04:29:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T04:29:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T04:29:01,676 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T04:29:01,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:29:01,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T04:29:01,677 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T04:29:07,181 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:29:20,565 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:29:50,565 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;e2eaa0f11f7e:34209 234 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@45aa0cf2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a93504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@f6119a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13601 Waited count: 14210 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@339c2ede Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@48663cbc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 916 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@692be441-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 50 Waited count: 3510 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75d43116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34539): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45028 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e00630a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34539): State: TIMED_WAITING Blocked count: 111 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34539): State: TIMED_WAITING Blocked count: 123 Waited count: 2682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34539): State: TIMED_WAITING Blocked count: 134 Waited count: 2682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34539): State: TIMED_WAITING Blocked count: 120 Waited count: 2695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34539): State: TIMED_WAITING Blocked count: 144 Waited count: 2681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@6eafca1-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 45473): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@312e90ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1567 Waited count: 1627 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1065665691) connection to localhost/127.0.0.1:34539 from jenkins): State: TIMED_WAITING Blocked count: 1410 Waited count: 1410 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1421273556-121-acceptor-0@54c5c502-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1421273556-122): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 0 Waited count: 2275 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 46489): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 2 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ebef2e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1559 Waited count: 1630 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 478 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 151 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (qtp334392358-152): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153-acceptor-0@781f96ea-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 912 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 161 (IPC Server idle connection scanner for port 41555): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 163 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Command processor): State: WAITING Blocked count: 0 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@479128e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 167 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1497 Waited count: 1628 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 150 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 159 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 169 (IPC Server handler 0 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 170 (IPC Server handler 1 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 2 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 3 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 4 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 212 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@659dc933[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (java.util.concurrent.ThreadPoolExecutor$Worker@499b2bc2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@4bc667b2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62533): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 11 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59415c3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:62533):): State: WAITING Blocked count: 2 Waited count: 448 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71c0c84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 486 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eacd11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 249 (LeaseRenewer:jenkins@localhost:34539): State: TIMED_WAITING Blocked count: 13 Waited count: 474 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5ba988b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:62533)): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73cc5be3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69e974d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 11 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38c6c7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 85 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@36907ab4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 162 Waited count: 664 Waiting on java.util.concurrent.Semaphore$NonfairSync@27122cc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209): State: WAITING Blocked count: 67 Waited count: 9029 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fb336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3e7a11e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@8387ef4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a69ac6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f0c8e43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4117775a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 89 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;e2eaa0f11f7e:34209): State: TIMED_WAITING Blocked count: 12 Waited count: 3936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f3e6cf8fd38.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@4a51eb11): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4517 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 133 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 9 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 16 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d4a8a99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aec8461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f0def50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f0bcca9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (LeaseRenewer:jenkins.hfs.2@localhost:34539): State: TIMED_WAITING Blocked count: 14 Waited count: 475 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 505 (LeaseRenewer:jenkins.hfs.0@localhost:34539): State: TIMED_WAITING Blocked count: 14 Waited count: 472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (LeaseRenewer:jenkins.hfs.1@localhost:34539): State: TIMED_WAITING Blocked count: 13 Waited count: 475 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44978 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 829 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 577 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-1): State: WAITING Blocked count: 5 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-2): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1003 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 73 Waited count: 114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49152eaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1653 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@7b67fc15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1884 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1885 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2196 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 373 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2657 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 567 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6430 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 436 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11336 (AsyncFSWAL-1-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d9929be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11339 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T04:30:20,566 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:30:50,566 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;e2eaa0f11f7e:34209 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@45aa0cf2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a93504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@13517fe6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13601 Waited count: 14211 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@339c2ede Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@48663cbc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1036 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@692be441-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 50 Waited count: 3510 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75d43116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34539): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 177 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50957 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e00630a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34539): State: TIMED_WAITING Blocked count: 113 Waited count: 2752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34539): State: TIMED_WAITING Blocked count: 143 Waited count: 2742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34539): State: TIMED_WAITING Blocked count: 134 Waited count: 2742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34539): State: TIMED_WAITING Blocked count: 120 Waited count: 2755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34539): State: TIMED_WAITING Blocked count: 144 Waited count: 2741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@6eafca1-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 45473): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@312e90ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1589 Waited count: 1672 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1065665691) connection to localhost/127.0.0.1:34539 from jenkins): State: TIMED_WAITING Blocked count: 1450 Waited count: 1450 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1421273556-121-acceptor-0@54c5c502-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1421273556-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 0 Waited count: 2317 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 46489): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 2 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ebef2e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1579 Waited count: 1683 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 531 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 538 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 151 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (qtp334392358-152): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153-acceptor-0@781f96ea-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1032 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 161 (IPC Server idle connection scanner for port 41555): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 163 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Command processor): State: WAITING Blocked count: 0 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@479128e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 167 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1517 Waited count: 1668 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 150 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 159 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 169 (IPC Server handler 0 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 170 (IPC Server handler 1 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 2 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 3 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 4 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 212 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@659dc933[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (java.util.concurrent.ThreadPoolExecutor$Worker@499b2bc2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@4bc667b2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62533): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 11 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59415c3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:62533):): State: WAITING Blocked count: 2 Waited count: 453 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71c0c84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 491 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eacd11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5ba988b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:62533)): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73cc5be3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69e974d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 11 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38c6c7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 85 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@36907ab4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 162 Waited count: 664 Waiting on java.util.concurrent.Semaphore$NonfairSync@27122cc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209): State: WAITING Blocked count: 67 Waited count: 9029 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fb336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3e7a11e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@8387ef4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a69ac6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f0c8e43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4117775a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 89 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;e2eaa0f11f7e:34209): State: TIMED_WAITING Blocked count: 12 Waited count: 3936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f3e6cf8fd38.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@4a51eb11): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5117 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 133 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7fc55e22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 9 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 16 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d4a8a99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aec8461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f0def50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f0bcca9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50980 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-1): State: WAITING Blocked count: 5 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-2): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1009 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 73 Waited count: 114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49152eaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1653 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@7b67fc15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1884 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1885 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2196 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 373 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2657 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6430 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 436 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11336 (AsyncFSWAL-1-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d9929be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11339 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T04:31:20,566 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:31:50,567 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;e2eaa0f11f7e:34209 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@45aa0cf2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a93504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@5792aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13601 Waited count: 14212 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@339c2ede Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@48663cbc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@692be441-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 50 Waited count: 3510 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75d43116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34539): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 197 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56883 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e00630a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34539): State: TIMED_WAITING Blocked count: 117 Waited count: 2812 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34539): State: TIMED_WAITING Blocked count: 146 Waited count: 2803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34539): State: TIMED_WAITING Blocked count: 134 Waited count: 2802 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34539): State: TIMED_WAITING Blocked count: 120 Waited count: 2815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34539): State: TIMED_WAITING Blocked count: 144 Waited count: 2801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@6eafca1-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 45473): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 384 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@312e90ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1611 Waited count: 1714 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 603 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1065665691) connection to localhost/127.0.0.1:34539 from jenkins): State: TIMED_WAITING Blocked count: 1497 Waited count: 1497 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1421273556-121-acceptor-0@54c5c502-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1421273556-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 0 Waited count: 2372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 46489): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 2 Waited count: 378 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ebef2e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1599 Waited count: 1726 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 598 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 151 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (qtp334392358-152): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153-acceptor-0@781f96ea-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 161 (IPC Server idle connection scanner for port 41555): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 163 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Command processor): State: WAITING Blocked count: 0 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@479128e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 167 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1537 Waited count: 1708 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 150 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 159 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 169 (IPC Server handler 0 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 170 (IPC Server handler 1 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 2 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 3 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 4 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 212 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@659dc933[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (java.util.concurrent.ThreadPoolExecutor$Worker@499b2bc2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@4bc667b2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62533): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 11 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59415c3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:62533):): State: WAITING Blocked count: 2 Waited count: 457 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71c0c84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 495 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eacd11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5ba988b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:62533)): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73cc5be3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69e974d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 11 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38c6c7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 85 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@36907ab4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 162 Waited count: 664 Waiting on java.util.concurrent.Semaphore$NonfairSync@27122cc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209): State: WAITING Blocked count: 67 Waited count: 9029 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fb336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3e7a11e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@8387ef4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a69ac6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f0c8e43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4117775a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 89 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;e2eaa0f11f7e:34209): State: TIMED_WAITING Blocked count: 12 Waited count: 3936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f3e6cf8fd38.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@4a51eb11): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5716 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 133 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7fc55e22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 9 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 16 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d4a8a99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aec8461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f0def50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f0bcca9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56982 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-1): State: WAITING Blocked count: 5 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-2): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1015 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 73 Waited count: 114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49152eaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1653 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@7b67fc15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1884 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1885 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2196 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6430 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 436 Waiting on java.util.concurrent.ForkJoinPool@387ddab5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11336 (AsyncFSWAL-1-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d9929be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11339 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11340 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:32:20,567 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:32:22,393 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=24, reuseRatio=70.59% 2024-12-02T04:32:22,395 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T04:32:30,027 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T04:32:50,567 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;e2eaa0f11f7e:34209 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@45aa0cf2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a93504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@4a313ab6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13601 Waited count: 14213 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@339c2ede Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@48663cbc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1276 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@692be441-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 50 Waited count: 3510 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75d43116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34539): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 213 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 217 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 62810 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e00630a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34539): State: TIMED_WAITING Blocked count: 139 Waited count: 2872 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34539): State: TIMED_WAITING Blocked count: 146 Waited count: 2863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34539): State: TIMED_WAITING Blocked count: 134 Waited count: 2862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34539): State: TIMED_WAITING Blocked count: 120 Waited count: 2875 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34539): State: TIMED_WAITING Blocked count: 144 Waited count: 2861 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 319 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@6eafca1-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1273 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 45473): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@312e90ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1634 Waited count: 1768 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 663 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1065665691) connection to localhost/127.0.0.1:34539 from jenkins): State: TIMED_WAITING Blocked count: 1537 Waited count: 1537 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1421273556-121-acceptor-0@54c5c502-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1421273556-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 0 Waited count: 2412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1273 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 46489): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 2 Waited count: 398 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ebef2e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1620 Waited count: 1772 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 658 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 151 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (qtp334392358-152): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153-acceptor-0@781f96ea-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1272 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 161 (IPC Server idle connection scanner for port 41555): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 163 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Command processor): State: WAITING Blocked count: 0 Waited count: 384 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@479128e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 167 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1557 Waited count: 1748 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 150 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 159 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 169 (IPC Server handler 0 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 170 (IPC Server handler 1 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 2 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 3 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 4 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 212 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bd3e7c8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7131a8d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@659dc933[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (java.util.concurrent.ThreadPoolExecutor$Worker@499b2bc2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bb7b774 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@4bc667b2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62533): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 319 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 11 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59415c3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:62533):): State: WAITING Blocked count: 2 Waited count: 461 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71c0c84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 499 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eacd11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5ba988b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 431 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:62533)): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73cc5be3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69e974d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 11 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38c6c7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 85 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@36907ab4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 162 Waited count: 664 Waiting on java.util.concurrent.Semaphore$NonfairSync@27122cc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209): State: WAITING Blocked count: 67 Waited count: 9029 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fb336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3e7a11e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@8387ef4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a69ac6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f0c8e43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4117775a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 89 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;e2eaa0f11f7e:34209): State: TIMED_WAITING Blocked count: 12 Waited count: 3936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f3e6cf8fd38.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@4a51eb11): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6316 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 133 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7fc55e22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 9 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 16 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d4a8a99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aec8461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f0def50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f0bcca9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62986 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-1): State: WAITING Blocked count: 5 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-2): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1021 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 73 Waited count: 114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49152eaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1653 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@7b67fc15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1884 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1885 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6430 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11336 (AsyncFSWAL-1-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d9929be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11340 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11345 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T04:33:20,568 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:33:50,568 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T04:33:55,112 DEBUG [M:0;e2eaa0f11f7e:34209 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733113735097Disabling compacts and flushes for region at 1733113735097Disabling writes for close at 1733113735110 (+13 ms)Obtaining lock to block concurrent updates at 1733113735110Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733113735110Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1021137, getHeapSize=1223712, getOffHeapSize=0, getCellsCount=2672 at 1733113735110Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733114035112 (+300002 ms) 2024-12-02T04:33:55,113 WARN [M:0;e2eaa0f11f7e:34209 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-02T04:33:55,117 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:33:55,121 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T04:33:55,122 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T04:33:55,122 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 2024-12-02T04:33:55,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:33:55,125 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:33:55,125 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 2024-12-02T04:33:55,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;e2eaa0f11f7e:34209 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 47 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@45aa0cf2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a93504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7005 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@1021ea12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13601 Waited count: 14214 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 18 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@339c2ede Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@48663cbc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1396 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@692be441-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:42955}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 50 Waited count: 3510 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75d43116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34539): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 233 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 237 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 68740 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e00630a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34539): State: TIMED_WAITING Blocked count: 141 Waited count: 2932 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34539): State: TIMED_WAITING Blocked count: 146 Waited count: 2923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34539): State: TIMED_WAITING Blocked count: 136 Waited count: 2923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34539): State: TIMED_WAITING Blocked count: 120 Waited count: 2935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34539): State: TIMED_WAITING Blocked count: 148 Waited count: 2921 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@6eafca1-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:39841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 45473): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@312e90ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1658 Waited count: 1814 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 698 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 698 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 723 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 45473): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1065665691) connection to localhost/127.0.0.1:34539 from jenkins): State: TIMED_WAITING Blocked count: 1585 Waited count: 1585 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1421273556-121-acceptor-0@54c5c502-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:42841}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1421273556-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 0 Waited count: 2464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 46489): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 2 Waited count: 418 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ebef2e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1641 Waited count: 1814 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 46489): State: TIMED_WAITING Blocked count: 0 Waited count: 718 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 151 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (qtp334392358-152): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f3e6c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp334392358-153-acceptor-0@781f96ea-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37197}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp334392358-154): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp334392358-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 161 (IPC Server idle connection scanner for port 41555): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 163 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Command processor): State: WAITING Blocked count: 0 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@479128e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 167 (BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539): State: TIMED_WAITING Blocked count: 1577 Waited count: 1788 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 150 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 159 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 169 (IPC Server handler 0 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 170 (IPC Server handler 1 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 2 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 699 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 3 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 4 on default port 41555): State: TIMED_WAITING Blocked count: 0 Waited count: 701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 212 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bd3e7c8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7131a8d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@659dc933[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (java.util.concurrent.ThreadPoolExecutor$Worker@499b2bc2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bb7b774 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@4bc667b2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62533): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 11 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59415c3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:62533):): State: WAITING Blocked count: 2 Waited count: 466 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71c0c84a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eacd11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5ba988b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:62533)): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73cc5be3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69e974d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 11 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56499ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38c6c7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 85 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@36907ab4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 162 Waited count: 664 Waiting on java.util.concurrent.Semaphore$NonfairSync@27122cc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34209): State: WAITING Blocked count: 67 Waited count: 9029 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fb336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34209): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b8a9ffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3e7a11e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@8387ef4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a69ac6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34209): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f0c8e43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4117775a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 89 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;e2eaa0f11f7e:34209): State: TIMED_WAITING Blocked count: 12 Waited count: 3937 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1440/0x00007f3e6d23b708.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/e2eaa0f11f7e:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@4a51eb11): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6915 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 133 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7fc55e22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 9 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 16 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d4a8a99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aec8461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f0def50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/e2eaa0f11f7e:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f0bcca9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68988 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-1): State: WAITING Blocked count: 5 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-2): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1027 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 73 Waited count: 114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49152eaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1653 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@7b67fc15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1884 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1885 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@565c9393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11336 (AsyncFSWAL-1-hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData-prefix:e2eaa0f11f7e,34209,1733113341303): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d9929be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11340 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11345 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11348 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11349 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1426/0x00007f3e6d2371c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T04:33:59,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T04:34:00,117 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-02T04:34:00,118 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T04:34:00,118 INFO [M:0;e2eaa0f11f7e:34209 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T04:34:00,119 INFO [M:0;e2eaa0f11f7e:34209 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34209 2024-12-02T04:34:00,119 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T04:34:00,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34539/user/jenkins/test-data/0684ac45-34e6-c17b-bdac-c222f67a1afd/MasterData/WALs/e2eaa0f11f7e,34209,1733113341303/e2eaa0f11f7e%2C34209%2C1733113341303.1733113342836 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-02T04:34:00,318 INFO [M:0;e2eaa0f11f7e:34209 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T04:34:00,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:34:00,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34209-0x101959075a30000, quorum=127.0.0.1:62533, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T04:34:00,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@367f3488{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:34:00,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:34:00,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:34:00,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T04:34:00,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:34:00,355 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T04:34:00,355 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T04:34:00,355 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T04:34:00,355 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-922013808-172.17.0.2-1733113336565 (Datanode Uuid c925c3c1-f5f6-4ac2-969a-f22abf49463f) service to localhost/127.0.0.1:34539 2024-12-02T04:34:00,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data5/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data6/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,358 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T04:34:00,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b9a68a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:34:00,360 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:34:00,360 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:34:00,361 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T04:34:00,361 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:34:00,361 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T04:34:00,361 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T04:34:00,361 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-922013808-172.17.0.2-1733113336565 (Datanode Uuid b92c3561-0c0c-4dd7-be73-ec7390f1dfa5) service to localhost/127.0.0.1:34539 2024-12-02T04:34:00,361 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T04:34:00,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data3/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data4/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,362 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T04:34:00,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64986ec5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T04:34:00,364 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:34:00,364 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:34:00,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T04:34:00,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:34:00,365 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T04:34:00,365 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T04:34:00,365 WARN [BP-922013808-172.17.0.2-1733113336565 heartbeating to localhost/127.0.0.1:34539 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-922013808-172.17.0.2-1733113336565 (Datanode Uuid 41803efc-33d8-4437-b70d-294066f67bca) service to localhost/127.0.0.1:34539 2024-12-02T04:34:00,365 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T04:34:00,366 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data1/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,366 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/cluster_d2663f80-3021-42b8-f08e-974b96d3ef20/data/data2/current/BP-922013808-172.17.0.2-1733113336565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T04:34:00,366 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T04:34:00,371 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5996a1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T04:34:00,371 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T04:34:00,371 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T04:34:00,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T04:34:00,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d94d7855-bc3c-a56d-db03-7a68547a0206/hadoop.log.dir/,STOPPED} 2024-12-02T04:34:00,381 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T04:34:00,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down